ngram
listlengths
0
82k
[ "= bot @commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8']) async def example_embed(self, ctx): embed =", "value='Tribute 2 monsters, then target 2 monsters; destroy those targets.", "monsters; destroy those targets. You can only activate 1 \"Sacrosanct", "c8(commands.Cog, name=\"c8\"): def __init__(self, bot: commands.Bot): self.bot = bot @commands.command(name='Sacrosanct_Devouring_Pyre',", "embed = discord.Embed(title='Sacrosanct Devouring Pyre', color=0xBC5A84) embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg') embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3',", "embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True) embed.add_field(name='Type', value='Trap/Normal', inline=False) embed.add_field(name='Card Effect', value='Tribute", "1 \"Sacrosanct Devouring Pyre\" per turn.', inline=False) embed.set_footer(text='Set Code: ANCF')", "turn.', inline=False) embed.set_footer(text='Set Code: ANCF') await ctx.send(embed=embed) def setup(bot: commands.Bot):", "Devouring Pyre\" per turn.', inline=False) embed.set_footer(text='Set Code: ANCF') await ctx.send(embed=embed)", "targets. You can only activate 1 \"Sacrosanct Devouring Pyre\" per", "embed.add_field(name='Card Effect', value='Tribute 2 monsters, then target 2 monsters; destroy", "get class c8(commands.Cog, name=\"c8\"): def __init__(self, bot: commands.Bot): self.bot =", "from discord.ext import commands from discord.utils import get class c8(commands.Cog,", "2 monsters, then target 2 monsters; destroy those targets. You", "2 monsters; destroy those targets. You can only activate 1", "class c8(commands.Cog, name=\"c8\"): def __init__(self, bot: commands.Bot): self.bot = bot", "monsters, then target 2 monsters; destroy those targets. You can", "discord.ext import commands from discord.utils import get class c8(commands.Cog, name=\"c8\"):", "async def example_embed(self, ctx): embed = discord.Embed(title='Sacrosanct Devouring Pyre', color=0xBC5A84)", "bot: commands.Bot): self.bot = bot @commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8']) async def example_embed(self,", "discord from discord.ext import commands from discord.utils import get class", "import commands from discord.utils import get class c8(commands.Cog, name=\"c8\"): def", "import get class c8(commands.Cog, name=\"c8\"): def __init__(self, bot: commands.Bot): self.bot", "aliases=['c8']) async def example_embed(self, ctx): embed = discord.Embed(title='Sacrosanct Devouring Pyre',", "discord.Embed(title='Sacrosanct Devouring Pyre', color=0xBC5A84) embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg') embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True) embed.add_field(name='Type',", "from discord.utils import get class c8(commands.Cog, name=\"c8\"): def __init__(self, bot:", "value='Trap/Normal', inline=False) embed.add_field(name='Card Effect', value='Tribute 2 monsters, then target 2", "inline=False) embed.add_field(name='Card Effect', value='Tribute 2 monsters, then target 2 monsters;", "<gh_stars>0 import discord from discord.ext import commands from discord.utils import", "then target 2 monsters; destroy those targets. You can only", "You can only activate 1 \"Sacrosanct Devouring Pyre\" per turn.',", "import discord from discord.ext import commands from discord.utils import get", "bot @commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8']) async def example_embed(self, ctx): embed = discord.Embed(title='Sacrosanct", "activate 1 \"Sacrosanct Devouring Pyre\" per turn.', inline=False) embed.set_footer(text='Set Code:", "inline=False) embed.set_footer(text='Set Code: ANCF') await ctx.send(embed=embed) def setup(bot: commands.Bot): bot.add_cog(c8(bot))", "inline=True) embed.add_field(name='Type', value='Trap/Normal', inline=False) embed.add_field(name='Card Effect', value='Tribute 2 monsters, then", "@commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8']) async def example_embed(self, ctx): embed = discord.Embed(title='Sacrosanct Devouring", "self.bot = bot @commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8']) async def example_embed(self, ctx): embed", "embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg') embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True) embed.add_field(name='Type', value='Trap/Normal', inline=False) embed.add_field(name='Card Effect',", "target 2 monsters; destroy those targets. You can only activate", "= discord.Embed(title='Sacrosanct Devouring Pyre', color=0xBC5A84) embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg') embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)", "Pyre\" per turn.', inline=False) embed.set_footer(text='Set Code: ANCF') await ctx.send(embed=embed) def", "value='Casual:3/Tournament:3', inline=True) embed.add_field(name='Type', value='Trap/Normal', inline=False) embed.add_field(name='Card Effect', value='Tribute 2 monsters,", "name=\"c8\"): def __init__(self, bot: commands.Bot): self.bot = bot @commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8'])", "destroy those targets. You can only activate 1 \"Sacrosanct Devouring", "commands from discord.utils import get class c8(commands.Cog, name=\"c8\"): def __init__(self,", "ctx): embed = discord.Embed(title='Sacrosanct Devouring Pyre', color=0xBC5A84) embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg') embed.add_field(name='Status (Archetype)',", "commands.Bot): self.bot = bot @commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8']) async def example_embed(self, ctx):", "color=0xBC5A84) embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg') embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True) embed.add_field(name='Type', value='Trap/Normal', inline=False) embed.add_field(name='Card", "can only activate 1 \"Sacrosanct Devouring Pyre\" per turn.', inline=False)", "Devouring Pyre', color=0xBC5A84) embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg') embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True) embed.add_field(name='Type', value='Trap/Normal',", "embed.add_field(name='Type', value='Trap/Normal', inline=False) embed.add_field(name='Card Effect', value='Tribute 2 monsters, then target", "per turn.', inline=False) embed.set_footer(text='Set Code: ANCF') await ctx.send(embed=embed) def setup(bot:", "those targets. You can only activate 1 \"Sacrosanct Devouring Pyre\"", "example_embed(self, ctx): embed = discord.Embed(title='Sacrosanct Devouring Pyre', color=0xBC5A84) embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg') embed.add_field(name='Status", "only activate 1 \"Sacrosanct Devouring Pyre\" per turn.', inline=False) embed.set_footer(text='Set", "Pyre', color=0xBC5A84) embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg') embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True) embed.add_field(name='Type', value='Trap/Normal', inline=False)", "discord.utils import get class c8(commands.Cog, name=\"c8\"): def __init__(self, bot: commands.Bot):", "def example_embed(self, ctx): embed = discord.Embed(title='Sacrosanct Devouring Pyre', color=0xBC5A84) embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2308475.jpg')", "(Archetype)', value='Casual:3/Tournament:3', inline=True) embed.add_field(name='Type', value='Trap/Normal', inline=False) embed.add_field(name='Card Effect', value='Tribute 2", "\"Sacrosanct Devouring Pyre\" per turn.', inline=False) embed.set_footer(text='Set Code: ANCF') await", "def __init__(self, bot: commands.Bot): self.bot = bot @commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8']) async", "Effect', value='Tribute 2 monsters, then target 2 monsters; destroy those", "__init__(self, bot: commands.Bot): self.bot = bot @commands.command(name='Sacrosanct_Devouring_Pyre', aliases=['c8']) async def" ]
[ "x = df[\"Text\"] y = df[\"PublicationTitle\"] train_x, test_x, train_y, test_y", "into numerical values the model can understand encoder = LabelEncoder()", "training dataset to the classifier print(\"TRAINING THE MODEL\") SVM =", "= pd.read_csv(\"preprocessed.csv\", index_col=0) print(df.head()) print(\"SPLITTING TRAIN-TEST\") x = df[\"Text\"] y", "a collection of text documents into numerical feature vectors #", "accuracy = accuracy_score(test_y, predictions) # print(\"Accuracy:\", str(accuracy * 100) +", "model_selection, svm from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder", "predictions = SVM.predict(test_x_tfidf) # # Calculate accuracy score # accuracy", "StratifiedKFold(n_splits=10, shuffle=True, random_state=1) accuracies = [] fold = 1 for", "classification. I am using Stratified K-Fold Cross Validation to prevent", "= encoder.fit_transform(test_y) # Word vectorization # turning a collection of", "classifier print(\"TRAINING THE MODEL\") SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')", "x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx] y_train_fold, y_test_fold = y[train_idx], y[test_idx] SVM.fit(x_train_fold,", "encoder.fit_transform(train_y) # test_y = encoder.fit_transform(test_y) # Word vectorization # turning", "np.min(accuracies)) print(\"Mean of Accuracies:\", np.mean(accuracies)) print(\"STD of Accuracies:\", np.std(accuracies)) #", "fold = 1 for train_idx, test_idx in skf.split(x, y): print(\"Working", "encode the target variable to transform categorical data of string", "df[\"PublicationTitle\"], test_size=0.3) # Label encode the target variable to transform", "perform the classification. I am using Stratified K-Fold Cross Validation", "train_idx, test_idx in skf.split(x, y): print(\"Working on fold\", fold) x_train_fold,", "sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text import", "# train_x_tfidf = tfidf_vect.transform(train_x) # test_x_tfidf = tfidf_vect.transform(test_x) x_tfidf =", "on fold\", fold) x_train_fold, x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx] y_train_fold, y_test_fold", "y[test_idx] SVM.fit(x_train_fold, y_train_fold) acc = SVM.score(x_test_fold, y_test_fold) print(\"Acc\", fold, \":\",", "as np import pandas as pd from sklearn import model_selection,", "y): print(\"Working on fold\", fold) x_train_fold, x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx]", "= SVM.predict(test_x_tfidf) # # Calculate accuracy score # accuracy =", "print(tfidf_vect.vocabulary_) # Fit the training dataset to the classifier print(\"TRAINING", "svm from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder from", "SVM model to perform the classification. I am using Stratified", "numpy as np import pandas as pd from sklearn import", "Label encode the target variable to transform categorical data of", "np.std(accuracies)) # print(\"RUNNING TEST PREDICTIONS\") # predictions = SVM.predict(test_x_tfidf) #", "Term Frequency - Inverse Document tfidf_vect = TfidfVectorizer(max_features=5000) tfidf_vect.fit(df[\"Text\"]) #", "# accuracy = accuracy_score(test_y, predictions) # print(\"Accuracy:\", str(accuracy * 100)", "TRAIN-TEST\") x = df[\"Text\"] y = df[\"PublicationTitle\"] train_x, test_x, train_y,", "understand encoder = LabelEncoder() # train_y = encoder.fit_transform(train_y) # test_y", "# test_x_tfidf = tfidf_vect.transform(test_x) x_tfidf = tfidf_vect.transform(df[\"Text\"]) y = encoder.fit_transform(y)", "skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1) accuracies = [] fold =", "text documents into numerical feature vectors # We are using", "test_y = model_selection.train_test_split( df[\"Text\"], df[\"PublicationTitle\"], test_size=0.3) # Label encode the", "svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto') skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1) accuracies", "the training dataset to the classifier print(\"TRAINING THE MODEL\") SVM", "sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import", "# predictions = SVM.predict(test_x_tfidf) # # Calculate accuracy score #", "# turning a collection of text documents into numerical feature", "encoder = LabelEncoder() # train_y = encoder.fit_transform(train_y) # test_y =", "of text documents into numerical feature vectors # We are", "accuracy score # accuracy = accuracy_score(test_y, predictions) # print(\"Accuracy:\", str(accuracy", "\"\"\" import numpy as np import pandas as pd from", "transform categorical data of string # type into numerical values", "tfidf_vect.transform(df[\"Text\"]) y = encoder.fit_transform(y) # print(tfidf_vect.vocabulary_) # Fit the training", "encoder.fit_transform(y) # print(tfidf_vect.vocabulary_) # Fit the training dataset to the", "# We are using Term Frequency - Inverse Document tfidf_vect", "tfidf_vect.transform(train_x) # test_x_tfidf = tfidf_vect.transform(test_x) x_tfidf = tfidf_vect.transform(df[\"Text\"]) y =", "MODEL\") SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto') skf = StratifiedKFold(n_splits=10,", "REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34 \"\"\" import numpy as np import pandas as", "fold\", fold) x_train_fold, x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx] y_train_fold, y_test_fold =", "train_x_tfidf = tfidf_vect.transform(train_x) # test_x_tfidf = tfidf_vect.transform(test_x) x_tfidf = tfidf_vect.transform(df[\"Text\"])", "from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import StratifiedKFold # Open", "test_x, train_y, test_y = model_selection.train_test_split( df[\"Text\"], df[\"PublicationTitle\"], test_size=0.3) # Label", "THE MODEL\") SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto') skf =", "TfidfVectorizer from sklearn.model_selection import StratifiedKFold # Open preproccessed csv df", "values the model can understand encoder = LabelEncoder() # train_y", "the preprocessed data is used to train the SVM model", "y_test_fold = y[train_idx], y[test_idx] SVM.fit(x_train_fold, y_train_fold) acc = SVM.score(x_test_fold, y_test_fold)", "df = pd.read_csv(\"preprocessed.csv\", index_col=0) print(df.head()) print(\"SPLITTING TRAIN-TEST\") x = df[\"Text\"]", "This script is where the preprocessed data is used to", "numerical values the model can understand encoder = LabelEncoder() #", "print(\"RUNNING TEST PREDICTIONS\") # predictions = SVM.predict(test_x_tfidf) # # Calculate", "= SVM.score(x_test_fold, y_test_fold) print(\"Acc\", fold, \":\", acc) accuracies.append(acc) fold +=", "am using Stratified K-Fold Cross Validation to prevent bias and/or", "using Stratified K-Fold Cross Validation to prevent bias and/or any", "into numerical feature vectors # We are using Term Frequency", "from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text", "Accuracies:\", np.mean(accuracies)) print(\"STD of Accuracies:\", np.std(accuracies)) # print(\"RUNNING TEST PREDICTIONS\")", "tfidf_vect = TfidfVectorizer(max_features=5000) tfidf_vect.fit(df[\"Text\"]) # train_x_tfidf = tfidf_vect.transform(train_x) # test_x_tfidf", "Stratified K-Fold Cross Validation to prevent bias and/or any imbalance", "are using Term Frequency - Inverse Document tfidf_vect = TfidfVectorizer(max_features=5000)", "Document tfidf_vect = TfidfVectorizer(max_features=5000) tfidf_vect.fit(df[\"Text\"]) # train_x_tfidf = tfidf_vect.transform(train_x) #", "pandas as pd from sklearn import model_selection, svm from sklearn.metrics", "We are using Term Frequency - Inverse Document tfidf_vect =", "pd.read_csv(\"preprocessed.csv\", index_col=0) print(df.head()) print(\"SPLITTING TRAIN-TEST\") x = df[\"Text\"] y =", "# Fit the training dataset to the classifier print(\"TRAINING THE", "# Word vectorization # turning a collection of text documents", "any imbalance that could affect the model's accuracy. REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34", "print(\"Acc\", fold, \":\", acc) accuracies.append(acc) fold += 1 print(\"ACCURACIES:\", accuracies)", "can understand encoder = LabelEncoder() # train_y = encoder.fit_transform(train_y) #", "I am using Stratified K-Fold Cross Validation to prevent bias", "= encoder.fit_transform(y) # print(tfidf_vect.vocabulary_) # Fit the training dataset to", "from sklearn import model_selection, svm from sklearn.metrics import accuracy_score from", "# type into numerical values the model can understand encoder", "fold += 1 print(\"ACCURACIES:\", accuracies) print(\"Max Accuracy:\", np.max(accuracies)) print(\"Min Accuracy:\",", "accuracy_score from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text import TfidfVectorizer from", "= LabelEncoder() # train_y = encoder.fit_transform(train_y) # test_y = encoder.fit_transform(test_y)", "import accuracy_score from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text import TfidfVectorizer", "# test_y = encoder.fit_transform(test_y) # Word vectorization # turning a", "= [] fold = 1 for train_idx, test_idx in skf.split(x,", "train the SVM model to perform the classification. I am", "from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection", "\":\", acc) accuracies.append(acc) fold += 1 print(\"ACCURACIES:\", accuracies) print(\"Max Accuracy:\",", "skf.split(x, y): print(\"Working on fold\", fold) x_train_fold, x_test_fold = x_tfidf[train_idx],", "data of string # type into numerical values the model", "= svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto') skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)", "Accuracies:\", np.std(accuracies)) # print(\"RUNNING TEST PREDICTIONS\") # predictions = SVM.predict(test_x_tfidf)", "csv df = pd.read_csv(\"preprocessed.csv\", index_col=0) print(df.head()) print(\"SPLITTING TRAIN-TEST\") x =", "data is used to train the SVM model to perform", "# print(tfidf_vect.vocabulary_) # Fit the training dataset to the classifier", "as pd from sklearn import model_selection, svm from sklearn.metrics import", "score # accuracy = accuracy_score(test_y, predictions) # print(\"Accuracy:\", str(accuracy *", "accuracies = [] fold = 1 for train_idx, test_idx in", "Cross Validation to prevent bias and/or any imbalance that could", "script is where the preprocessed data is used to train", "# Open preproccessed csv df = pd.read_csv(\"preprocessed.csv\", index_col=0) print(df.head()) print(\"SPLITTING", "print(\"Min Accuracy:\", np.min(accuracies)) print(\"Mean of Accuracies:\", np.mean(accuracies)) print(\"STD of Accuracies:\",", "train_x, test_x, train_y, test_y = model_selection.train_test_split( df[\"Text\"], df[\"PublicationTitle\"], test_size=0.3) #", "= StratifiedKFold(n_splits=10, shuffle=True, random_state=1) accuracies = [] fold = 1", "is where the preprocessed data is used to train the", "import TfidfVectorizer from sklearn.model_selection import StratifiedKFold # Open preproccessed csv", "tfidf_vect.transform(test_x) x_tfidf = tfidf_vect.transform(df[\"Text\"]) y = encoder.fit_transform(y) # print(tfidf_vect.vocabulary_) #", "preproccessed csv df = pd.read_csv(\"preprocessed.csv\", index_col=0) print(df.head()) print(\"SPLITTING TRAIN-TEST\") x", "vectorization # turning a collection of text documents into numerical", "encoder.fit_transform(test_y) # Word vectorization # turning a collection of text", "dataset to the classifier print(\"TRAINING THE MODEL\") SVM = svm.SVC(C=1.0,", "random_state=1) accuracies = [] fold = 1 for train_idx, test_idx", "print(df.head()) print(\"SPLITTING TRAIN-TEST\") x = df[\"Text\"] y = df[\"PublicationTitle\"] train_x,", "fold, \":\", acc) accuracies.append(acc) fold += 1 print(\"ACCURACIES:\", accuracies) print(\"Max", "the model's accuracy. REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34 \"\"\" import numpy as np", "import model_selection, svm from sklearn.metrics import accuracy_score from sklearn.preprocessing import", "test_size=0.3) # Label encode the target variable to transform categorical", "- Inverse Document tfidf_vect = TfidfVectorizer(max_features=5000) tfidf_vect.fit(df[\"Text\"]) # train_x_tfidf =", "print(\"Mean of Accuracies:\", np.mean(accuracies)) print(\"STD of Accuracies:\", np.std(accuracies)) # print(\"RUNNING", "to the classifier print(\"TRAINING THE MODEL\") SVM = svm.SVC(C=1.0, kernel='linear',", "that could affect the model's accuracy. REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34 \"\"\" import", "is used to train the SVM model to perform the", "accuracy. REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34 \"\"\" import numpy as np import pandas", "train_y, test_y = model_selection.train_test_split( df[\"Text\"], df[\"PublicationTitle\"], test_size=0.3) # Label encode", "print(\"ACCURACIES:\", accuracies) print(\"Max Accuracy:\", np.max(accuracies)) print(\"Min Accuracy:\", np.min(accuracies)) print(\"Mean of", "= y[train_idx], y[test_idx] SVM.fit(x_train_fold, y_train_fold) acc = SVM.score(x_test_fold, y_test_fold) print(\"Acc\",", "the classifier print(\"TRAINING THE MODEL\") SVM = svm.SVC(C=1.0, kernel='linear', degree=3,", "and/or any imbalance that could affect the model's accuracy. REFERENCE:", "Calculate accuracy score # accuracy = accuracy_score(test_y, predictions) # print(\"Accuracy:\",", "numerical feature vectors # We are using Term Frequency -", "PREDICTIONS\") # predictions = SVM.predict(test_x_tfidf) # # Calculate accuracy score", "collection of text documents into numerical feature vectors # We", "x_tfidf[train_idx], x_tfidf[test_idx] y_train_fold, y_test_fold = y[train_idx], y[test_idx] SVM.fit(x_train_fold, y_train_fold) acc", "fold) x_train_fold, x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx] y_train_fold, y_test_fold = y[train_idx],", "model can understand encoder = LabelEncoder() # train_y = encoder.fit_transform(train_y)", "y = df[\"PublicationTitle\"] train_x, test_x, train_y, test_y = model_selection.train_test_split( df[\"Text\"],", "to perform the classification. I am using Stratified K-Fold Cross", "acc) accuracies.append(acc) fold += 1 print(\"ACCURACIES:\", accuracies) print(\"Max Accuracy:\", np.max(accuracies))", "= 1 for train_idx, test_idx in skf.split(x, y): print(\"Working on", "could affect the model's accuracy. REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34 \"\"\" import numpy", "from sklearn.model_selection import StratifiedKFold # Open preproccessed csv df =", "to prevent bias and/or any imbalance that could affect the", "to transform categorical data of string # type into numerical", "tfidf_vect.fit(df[\"Text\"]) # train_x_tfidf = tfidf_vect.transform(train_x) # test_x_tfidf = tfidf_vect.transform(test_x) x_tfidf", "import StratifiedKFold # Open preproccessed csv df = pd.read_csv(\"preprocessed.csv\", index_col=0)", "# Label encode the target variable to transform categorical data", "test_x_tfidf = tfidf_vect.transform(test_x) x_tfidf = tfidf_vect.transform(df[\"Text\"]) y = encoder.fit_transform(y) #", "Validation to prevent bias and/or any imbalance that could affect", "of string # type into numerical values the model can", "for train_idx, test_idx in skf.split(x, y): print(\"Working on fold\", fold)", "LabelEncoder() # train_y = encoder.fit_transform(train_y) # test_y = encoder.fit_transform(test_y) #", "vectors # We are using Term Frequency - Inverse Document", "categorical data of string # type into numerical values the", "pd from sklearn import model_selection, svm from sklearn.metrics import accuracy_score", "https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34 \"\"\" import numpy as np import pandas as pd", "sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import StratifiedKFold # Open preproccessed", "accuracies) print(\"Max Accuracy:\", np.max(accuracies)) print(\"Min Accuracy:\", np.min(accuracies)) print(\"Mean of Accuracies:\",", "y_test_fold) print(\"Acc\", fold, \":\", acc) accuracies.append(acc) fold += 1 print(\"ACCURACIES:\",", "# # Calculate accuracy score # accuracy = accuracy_score(test_y, predictions)", "np.max(accuracies)) print(\"Min Accuracy:\", np.min(accuracies)) print(\"Mean of Accuracies:\", np.mean(accuracies)) print(\"STD of", "SVM.fit(x_train_fold, y_train_fold) acc = SVM.score(x_test_fold, y_test_fold) print(\"Acc\", fold, \":\", acc)", "SVM.predict(test_x_tfidf) # # Calculate accuracy score # accuracy = accuracy_score(test_y,", "df[\"PublicationTitle\"] train_x, test_x, train_y, test_y = model_selection.train_test_split( df[\"Text\"], df[\"PublicationTitle\"], test_size=0.3)", "Open preproccessed csv df = pd.read_csv(\"preprocessed.csv\", index_col=0) print(df.head()) print(\"SPLITTING TRAIN-TEST\")", "print(\"TRAINING THE MODEL\") SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto') skf", "the target variable to transform categorical data of string #", "gamma='auto') skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1) accuracies = [] fold", "import LabelEncoder from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import StratifiedKFold", "1 print(\"ACCURACIES:\", accuracies) print(\"Max Accuracy:\", np.max(accuracies)) print(\"Min Accuracy:\", np.min(accuracies)) print(\"Mean", "x_train_fold, x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx] y_train_fold, y_test_fold = y[train_idx], y[test_idx]", "model_selection.train_test_split( df[\"Text\"], df[\"PublicationTitle\"], test_size=0.3) # Label encode the target variable", "target variable to transform categorical data of string # type", "StratifiedKFold # Open preproccessed csv df = pd.read_csv(\"preprocessed.csv\", index_col=0) print(df.head())", "= tfidf_vect.transform(test_x) x_tfidf = tfidf_vect.transform(df[\"Text\"]) y = encoder.fit_transform(y) # print(tfidf_vect.vocabulary_)", "= tfidf_vect.transform(df[\"Text\"]) y = encoder.fit_transform(y) # print(tfidf_vect.vocabulary_) # Fit the", "affect the model's accuracy. REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34 \"\"\" import numpy as", "variable to transform categorical data of string # type into", "SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto') skf = StratifiedKFold(n_splits=10, shuffle=True,", "bias and/or any imbalance that could affect the model's accuracy.", "import pandas as pd from sklearn import model_selection, svm from", "using Term Frequency - Inverse Document tfidf_vect = TfidfVectorizer(max_features=5000) tfidf_vect.fit(df[\"Text\"])", "feature vectors # We are using Term Frequency - Inverse", "= tfidf_vect.transform(train_x) # test_x_tfidf = tfidf_vect.transform(test_x) x_tfidf = tfidf_vect.transform(df[\"Text\"]) y", "= encoder.fit_transform(train_y) # test_y = encoder.fit_transform(test_y) # Word vectorization #", "of Accuracies:\", np.mean(accuracies)) print(\"STD of Accuracies:\", np.std(accuracies)) # print(\"RUNNING TEST", "prevent bias and/or any imbalance that could affect the model's", "y_train_fold) acc = SVM.score(x_test_fold, y_test_fold) print(\"Acc\", fold, \":\", acc) accuracies.append(acc)", "y = encoder.fit_transform(y) # print(tfidf_vect.vocabulary_) # Fit the training dataset", "Inverse Document tfidf_vect = TfidfVectorizer(max_features=5000) tfidf_vect.fit(df[\"Text\"]) # train_x_tfidf = tfidf_vect.transform(train_x)", "= df[\"PublicationTitle\"] train_x, test_x, train_y, test_y = model_selection.train_test_split( df[\"Text\"], df[\"PublicationTitle\"],", "df[\"Text\"], df[\"PublicationTitle\"], test_size=0.3) # Label encode the target variable to", "TfidfVectorizer(max_features=5000) tfidf_vect.fit(df[\"Text\"]) # train_x_tfidf = tfidf_vect.transform(train_x) # test_x_tfidf = tfidf_vect.transform(test_x)", "acc = SVM.score(x_test_fold, y_test_fold) print(\"Acc\", fold, \":\", acc) accuracies.append(acc) fold", "test_idx in skf.split(x, y): print(\"Working on fold\", fold) x_train_fold, x_test_fold", "= accuracy_score(test_y, predictions) # print(\"Accuracy:\", str(accuracy * 100) + \"%\")", "np import pandas as pd from sklearn import model_selection, svm", "model to perform the classification. I am using Stratified K-Fold", "= TfidfVectorizer(max_features=5000) tfidf_vect.fit(df[\"Text\"]) # train_x_tfidf = tfidf_vect.transform(train_x) # test_x_tfidf =", "# train_y = encoder.fit_transform(train_y) # test_y = encoder.fit_transform(test_y) # Word", "[] fold = 1 for train_idx, test_idx in skf.split(x, y):", "y[train_idx], y[test_idx] SVM.fit(x_train_fold, y_train_fold) acc = SVM.score(x_test_fold, y_test_fold) print(\"Acc\", fold,", "shuffle=True, random_state=1) accuracies = [] fold = 1 for train_idx,", "to train the SVM model to perform the classification. I", "df[\"Text\"] y = df[\"PublicationTitle\"] train_x, test_x, train_y, test_y = model_selection.train_test_split(", "the SVM model to perform the classification. I am using", "documents into numerical feature vectors # We are using Term", "x_tfidf[test_idx] y_train_fold, y_test_fold = y[train_idx], y[test_idx] SVM.fit(x_train_fold, y_train_fold) acc =", "accuracies.append(acc) fold += 1 print(\"ACCURACIES:\", accuracies) print(\"Max Accuracy:\", np.max(accuracies)) print(\"Min", "Fit the training dataset to the classifier print(\"TRAINING THE MODEL\")", "where the preprocessed data is used to train the SVM", "kernel='linear', degree=3, gamma='auto') skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1) accuracies =", "# Calculate accuracy score # accuracy = accuracy_score(test_y, predictions) #", "\"\"\" This script is where the preprocessed data is used", "1 for train_idx, test_idx in skf.split(x, y): print(\"Working on fold\",", "Accuracy:\", np.max(accuracies)) print(\"Min Accuracy:\", np.min(accuracies)) print(\"Mean of Accuracies:\", np.mean(accuracies)) print(\"STD", "print(\"Working on fold\", fold) x_train_fold, x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx] y_train_fold,", "print(\"Max Accuracy:\", np.max(accuracies)) print(\"Min Accuracy:\", np.min(accuracies)) print(\"Mean of Accuracies:\", np.mean(accuracies))", "model's accuracy. REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34 \"\"\" import numpy as np import", "TEST PREDICTIONS\") # predictions = SVM.predict(test_x_tfidf) # # Calculate accuracy", "# print(\"RUNNING TEST PREDICTIONS\") # predictions = SVM.predict(test_x_tfidf) # #", "+= 1 print(\"ACCURACIES:\", accuracies) print(\"Max Accuracy:\", np.max(accuracies)) print(\"Min Accuracy:\", np.min(accuracies))", "degree=3, gamma='auto') skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1) accuracies = []", "sklearn.model_selection import StratifiedKFold # Open preproccessed csv df = pd.read_csv(\"preprocessed.csv\",", "string # type into numerical values the model can understand", "of Accuracies:\", np.std(accuracies)) # print(\"RUNNING TEST PREDICTIONS\") # predictions =", "imbalance that could affect the model's accuracy. REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34 \"\"\"", "Word vectorization # turning a collection of text documents into", "test_y = encoder.fit_transform(test_y) # Word vectorization # turning a collection", "SVM.score(x_test_fold, y_test_fold) print(\"Acc\", fold, \":\", acc) accuracies.append(acc) fold += 1", "x_tfidf = tfidf_vect.transform(df[\"Text\"]) y = encoder.fit_transform(y) # print(tfidf_vect.vocabulary_) # Fit", "preprocessed data is used to train the SVM model to", "print(\"STD of Accuracies:\", np.std(accuracies)) # print(\"RUNNING TEST PREDICTIONS\") # predictions", "the model can understand encoder = LabelEncoder() # train_y =", "train_y = encoder.fit_transform(train_y) # test_y = encoder.fit_transform(test_y) # Word vectorization", "import numpy as np import pandas as pd from sklearn", "type into numerical values the model can understand encoder =", "np.mean(accuracies)) print(\"STD of Accuracies:\", np.std(accuracies)) # print(\"RUNNING TEST PREDICTIONS\") #", "= x_tfidf[train_idx], x_tfidf[test_idx] y_train_fold, y_test_fold = y[train_idx], y[test_idx] SVM.fit(x_train_fold, y_train_fold)", "LabelEncoder from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import StratifiedKFold #", "index_col=0) print(df.head()) print(\"SPLITTING TRAIN-TEST\") x = df[\"Text\"] y = df[\"PublicationTitle\"]", "sklearn import model_selection, svm from sklearn.metrics import accuracy_score from sklearn.preprocessing", "print(\"SPLITTING TRAIN-TEST\") x = df[\"Text\"] y = df[\"PublicationTitle\"] train_x, test_x,", "turning a collection of text documents into numerical feature vectors", "y_train_fold, y_test_fold = y[train_idx], y[test_idx] SVM.fit(x_train_fold, y_train_fold) acc = SVM.score(x_test_fold,", "= model_selection.train_test_split( df[\"Text\"], df[\"PublicationTitle\"], test_size=0.3) # Label encode the target", "Frequency - Inverse Document tfidf_vect = TfidfVectorizer(max_features=5000) tfidf_vect.fit(df[\"Text\"]) # train_x_tfidf", "K-Fold Cross Validation to prevent bias and/or any imbalance that", "Accuracy:\", np.min(accuracies)) print(\"Mean of Accuracies:\", np.mean(accuracies)) print(\"STD of Accuracies:\", np.std(accuracies))", "in skf.split(x, y): print(\"Working on fold\", fold) x_train_fold, x_test_fold =", "the classification. I am using Stratified K-Fold Cross Validation to", "= df[\"Text\"] y = df[\"PublicationTitle\"] train_x, test_x, train_y, test_y =", "used to train the SVM model to perform the classification." ]
[ "EntryPoint from red_dwarf.project import provide_project_context, ProjectContext class InitProject(EntryPoint): @provide_project_context def", "red_dwarf.project import provide_project_context, ProjectContext class InitProject(EntryPoint): @provide_project_context def run(self, project_context:", "import provide_project_context, ProjectContext class InitProject(EntryPoint): @provide_project_context def run(self, project_context: ProjectContext)", "class InitProject(EntryPoint): @provide_project_context def run(self, project_context: ProjectContext) -> None: project_context.init_project()", "rsterm import EntryPoint from red_dwarf.project import provide_project_context, ProjectContext class InitProject(EntryPoint):", "<gh_stars>0 from rsterm import EntryPoint from red_dwarf.project import provide_project_context, ProjectContext", "import EntryPoint from red_dwarf.project import provide_project_context, ProjectContext class InitProject(EntryPoint): @provide_project_context", "ProjectContext class InitProject(EntryPoint): @provide_project_context def run(self, project_context: ProjectContext) -> None:", "from red_dwarf.project import provide_project_context, ProjectContext class InitProject(EntryPoint): @provide_project_context def run(self,", "provide_project_context, ProjectContext class InitProject(EntryPoint): @provide_project_context def run(self, project_context: ProjectContext) ->", "from rsterm import EntryPoint from red_dwarf.project import provide_project_context, ProjectContext class" ]
[ "do not have any actual bases, but only the '*'", "threshold. # However, the total coverage also accounts for the", "# summary_file = output_prefix + '.conssum'; summary_file = \"%s-cov_%d.variant.sum\" %", "\"chromosome:start-end\"\\n\\n' ) exit(1) reference_file = sys.argv[1] coverage_threshold = int(sys.argv[2]) output_prefix", "= [] if output_prefix == \"-\": output_prefix = os.path.splitext(sam_file)[0] main(sam_file,", "= 0 insertion_unique = False # Sanity check, just to", "continue if use_bed == True: line_split = line.strip().split(\"\\t\") if len(line_split)", ") def CollectSummaries( sam_files, prefix_for_intermediate_results, collective_output_file ): fp_collect = None", "CollectSummaries( sam_files, prefix_for_intermediate_results, collective_output_file ): fp_collect = None try: fp_collect", "+= most_common_insertion_length # variant_line = 'insertion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts),", "encodes two # symbols: '^' marking the read start and", "this base didn't get skipped because of a deletion #", "were any deletions (to avoid index out of bounds error).", "% alignments_path_bam), coverage_threshold, output_prefix, thread_id, bed_position, ) def CollectSummaries( sam_files,", "deletion_count in sorted_deletion_counts ] ) == 1 ) else False", "I compared the total coverage of the current base with", "> coverage_threshold: # Sanity check, just to see if there", "len(split_line) < 5 or len(split_line) > 6: sys.stderr.write(line + \"\\n\")", "(output_prefix, coverage_threshold) fp_variant = open(variant_file, \"w\") vcf_file = \"%s-cov_%d.variant.vcf\" %", "0 if (use_bed == False) else max((bed_pos_start - 10), 0)", "string specifying \"chromosome:start-end\"\\n\\n' ) exit(1) reference_file = sys.argv[1] coverage_threshold =", "no files will be written to disk.)\\n'); # exit(1); if", "if output_prefix != \"\": if not os.path.exists(os.path.dirname(output_prefix)): os.makedirs(os.path.dirname(output_prefix)) variant_file =", "ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=False, ): # Split the", "# print 'line_number: %d' % line_number; # print line; #", "0]] sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1) ) try: most_common_base_count =", "insertion_count ) > coverage_threshold: # Sanity check, just to see", "ret_num_called_bases = [0] ret_num_correct_bases = [0] ret_coverage_sum = [0] #", "# sys.stderr.write('sorted_base_counts:\\n'); # sys.stderr.write(str(sorted_base_counts) + '\\n'); # sys.stderr.write('base_counts:\\n'); # sys.stderr.write(str(base_counts)", "# ret_variant_list.append(variant_line); variant_line = ( \"undercovered1\\tpos = %s\\tref = %s\\tcoverage", "alignments_path_bam_exists = os.path.exists(alignments_path_bam) # Check if a BAM file with", "= %s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_insertion_count = %d\\tref_base", "(str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### VCF", "- deletion_count - insertion_count ) if ( non_indel_coverage_next_base + deletion_count", "convert it to a sorted BAM. alignments_path_bam = alignments_path if", "verbose == True: sys.stdout.write(\"Reference base: %s\\n\" % (ref_base)) sys.stdout.write(\"Consensus base:", "to the deletion marking, but here we actually care about", "marks the occurance of an insertion. It is a composite", "i) + num_bases - 1 insertion_count += 1 insertion =", "sorted_insertion_counts = sorted( list(insertion_event_counts.items()), key=operator.itemgetter(1) ) most_common_insertion_count = sorted_insertion_counts[-1][1] most_common_insertion_length", "\"%s-cov_%d.variant.vcf\" % (output_prefix, coverage_threshold) fp_vcf = open(vcf_file, \"w\") fp_vcf.write(\"##fileformat=VCFv4.0\\n\") fp_vcf.write(\"##fileDate=20150409\\n\")", "most_common_deletion_count > non_indel_coverage_next_base ): # In this case, deletions are", "undercovered bases. non_indel_coverage_current_base = int(coverage) - current_base_deletion_count if verbose ==", "+ '\\n'); # most_common_base_count = 0; # Allow for the", "insertion. It is a composite object # consisting of: the", "os import sys import operator import subprocess def increase_in_dict(dict_counter, value):", "a sanity check. split_line = line.strip().split(\"\\t\") if len(split_line) < 5", "VCF output ### alt_base = ( (\"{}\") if (len(sorted_base_counts) ==", "ref-forward bases, ref-reverse, alt-forward and alt-reverse bases\">\\n' ) fp_vcf.write( '##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates", "there is going # to be a deletion event. If", "0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line)", "non-deletion, and the following bases being a deletion event. ###", "variant_line = ( \"SNP\\tpos = %s\\tref = %s\\tcoverage = %d\\tnon_indel_cov_curr", "+ \"\\n\") fp_variant.flush() if len(ret_vcf_list) > vcf_list_length and fp_vcf !=", "mnp, complex)\">\\n' ) fp_vcf.write( '##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele Frequency\">\\n' ) fp_vcf.write( '##INFO=<ID=SB,Number=1,Type=Integer,Description=\"Phred-scaled strand", "with the '*' sign, which I think # isn't relevant,", "bases that are deleted (these bases follow the current position).", ") try: most_common_base_count = sorted_base_counts[-1][1] except Exception as e: pass", "of its extension. # Also, if input file is a", "position\">\\n' ) fp_vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\") fp_vcf.flush() use_bed = False bed_chromosome = \"\"", "summary_file = prefix_for_intermediate_results + \".sum\" try: fp_sum = open(summary_file, \"r\")", "is equal to the reference. is_good = False for base_count", "ret_vcf_list.append(vcf_line) ################## else: sys.stderr.write( \"\\nWarning: a SNP was detected, but", "1000 info = \"DP=%s;TYPE=del\" % (coverage) ref_field = \"%s%s\" %", ") else False ) else: most_common_deletion_count = 0 most_common_deletion_length =", "+ \"/\" + os.path.splitext(os.path.basename(alignments_path))[0] + \".bam\" ) alignments_path_bam_exists = os.path.exists(alignments_path_bam)", "in the VCF format specifies the position where a deletion", "8,2*#-;)$B>2$1&D- # I chose to handle them as undercovered bases.", "are inserted; j = i + 1 while bases[j] in", "% bed_pos_start) sys.stderr.write(\"\\tEnd: %d\\n\\n\" % bed_pos_end) # i = 0;", "most_common_insertion_count) for insertion_count in sorted_insertion_counts ] ) == 1 )", "= \"DP=%s;TYPE=snp\" % (coverage) ref_field = ref_base alt_field = alt_base", "( \"ins\\tpos = %s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_insertion_count", "> 0: ret_snp_count[0] += 1 # ret_variant_list.append(line_number); variant_line = (", "'+', the number of the inserted bases # and the", "= ( dir_name + \"/\" + os.path.splitext(os.path.basename(alignments_path))[0] + \".bam\" )", "snps = %d, insertions = %d, deletions = %d, undercovered", "fp_variant != None: fp_variant.close() if fp_vcf != None: fp_vcf.close() summary_lines", "to \"-\", no files will be written to disk.)\\n' )", "if len(sorted_base_counts) > 0: ret_snp_count[0] += 1 # ret_variant_list.append(line_number); variant_line", "Insertions in the VCF format specifies the position where a", "== 6: qualities = split_line[5] bases = \"\" # Replace", "of a read. end_counts += 1 elif base == r\"*\":", "fp_vcf.write(\"\\n\".join(ret_vcf_list[vcf_list_length:]) + \"\\n\") fp_vcf.flush() i += num_bases_to_skip i += 1", "bases that are inserted; j = i + 1 while", "In this case, we prefer the choice which is equal", "There are pileup bases that do not have any actual", "the coverage threshold. # However, the total coverage also accounts", "bed_position); processes = [] if output_prefix == \"-\": output_prefix =", "ref_base alt_field = \"%s%s\" % (ref_base, sorted_insertion_counts[-1][0]) vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\"", "'##INFO=<ID=TYPE,Number=A,Type=String,Description=\"Type of each allele (snp, ins, del, mnp, complex)\">\\n' )", "# sys.stderr.write(str(e) + '\\n'); # sys.stderr.write('sorted_base_counts:\\n'); # sys.stderr.write(str(sorted_base_counts) + '\\n');", "actually were any deletions (to avoid index out of bounds", "%s\\n\" % mpileup_path summary_lines += \"coverage_threshold: %d\\n\" % coverage_threshold summary_lines", "which is the same as ref, but the alt field", "= None try: fp_collect = open(collective_output_file, \"w\") except IOError: sys.stderr.write(", "[0] ret_num_correct_bases = [0] ret_coverage_sum = [0] # lines =", "most_common_deletion_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line)", "################## return most_common_deletion_length else: # In this case, either the", "However, the total coverage also accounts for the deletions denoted", "None ret_variant_list = [] ret_vcf_list = [] ret_snp_count = [0]", "summary_lines += \"num_correct_bases: %d\\n\" % ret_num_correct_bases[0] summary_lines += \"average_coverage: %.2f\\n\"", "Input file path: \"%s\".\\n' % alignments_path ) return # Convert", "> non_indel_coverage_next_base ): # In this case, deletions are a", "ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); ### Handling indel consensus.", "!= \"\": if not os.path.exists(os.path.dirname(output_prefix)): os.makedirs(os.path.dirname(output_prefix)) variant_file = \"%s-cov_%d.variant.csv\" %", "coverage = split_line[3] original_bases = split_line[4] if len(split_line) == 6:", "j]) skip_bases = (j - i) + num_bases - 1", "= output_prefix + '.conssum'; summary_file = \"%s-cov_%d.variant.sum\" % (output_prefix, coverage_threshold)", "str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### Deletions in the VCF", "str(base_counts))) # EDIT: Previously I compared the total coverage of", "\"coverage_threshold: %d\\n\" % coverage_threshold summary_lines += \"snp_count: %d\\n\" % ret_snp_count[0]", "increase_in_dict(deletion_event_counts, deletion) # Skip the length of the numeric entry", "\"\\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\\n\" % sys.argv[0] ) sys.stderr.write(", "should contain the base which is the same as ref,", "[] ret_vcf_list = [] ret_snp_count = [0] ret_insertion_count = [0]", "1000 info = \"DP=%s;TYPE=ins\" % (coverage) ref_field = ref_base alt_field", "frequency variant).\">\\n' ) fp_vcf.write( '##INFO=<ID=HRUN,Number=1,Type=Integer,Description=\"Homopolymer length to the right of", "while bases[j] in \"0123456789\": j += 1 num_bases = int(bases[(i", "or len(split_line) > 6: sys.stderr.write(line + \"\\n\") return 0 ref_name", "non_indel_coverage_current_base = int(coverage) - current_base_deletion_count if verbose == True: sys.stdout.write(\"%s\\nbase_counts:", "most_common_base_count = 0 ### Handling base consensus. sorted_base_counts = sorted(", "specifies the position where a deletion occurs, with the first", "= int(line_split[1]) if current_pos < bed_pos_start or current_pos >= bed_pos_end:", "+= \"insertion_count: %d\\n\" % ret_insertion_count[0] summary_lines += \"deletion_count: %d\\n\" %", "= split_line[3] original_bases = split_line[4] if len(split_line) == 6: qualities", "continue if thread_id == 0: if (j % 1000) ==", "= sorted_deletion_counts[-1][1] most_common_deletion_length = len(sorted_deletion_counts[-1][0]) deletion_unique = ( True if", "= [0] ret_insertion_count = [0] ret_deletion_count = [0] ret_num_undercovered_bases =", "consensus), then the deletions on this base are ignored. #", "( alignments_path_bam_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(alignments_path_bam) ): #", "for ref-forward bases, ref-reverse, alt-forward and alt-reverse bases\">\\n' ) fp_vcf.write(", "or ( alignments_path_bam_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(alignments_path_bam) ):", "sign, which I think # isn't relevant, as deletions are", "any actual bases, but only the '*' symbols. How should", "len(bed_split) != 2: use_bed = False else: bed_chromosome = bed_split[0]", "# if (len(sys.argv) < 5): # sys.stderr.write('Usage:\\n'); # sys.stderr.write('\\t%s <reference_file_path>", "\"deletion_count: %d\\n\" % ret_deletion_count[0] summary_lines += \"num_undercovered_bases: %d\\n\" % ret_num_undercovered_bases[0]", "coverage_threshold: # Sanity check, just to see if there actually", "by 1, because we have i += 1 down there.", "( most_common_deletion_count > most_common_insertion_count and most_common_deletion_count > non_indel_coverage_next_base ): #", "\"undercovered1\\tpos = %s\\tref = %s\\tcoverage = %d\\tbase_counts = %s\\tinsertion_counts =", "%s\\t%s\" % ( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count, ref_base, temp_sorted_bc,", "fp.readlines(); fp_variant = None fp_vcf = None if output_prefix !=", "return None ret_variant_list = [] ret_vcf_list = [] ret_snp_count =", "insertion_unique = False # Sanity check, just to see if", "consensus. ### Put a different coverage threshold. Here we are", "(because we don't know where it ends). non_indel_coverage_next_base = (", "bases = \"\" # Replace the '.' and ',' signs", "our approach, we ignore this case, because we count deletions", "file \"%s\"...\\n' % reference_path) sys.stderr.write(\"Coverage threshold: %d\\n\" % coverage_threshold) summary_file", "contain the base which is the same as ref, but", "existence of the file, and the correctness of its extension.", ") ) ret_variant_list.append(variant_line) ### VCF output ### alt_base = (", "= 0 deletion_unique = False if ( most_common_insertion_count > most_common_deletion_count", "+= 1 num_bases = int(bases[(i + 1) : j]) skip_bases", "a BAM file with the given name already exists. if", "for insertion_count in sorted_insertion_counts ] ) == 1 ) else", "'##INFO=<ID=HRUN,Number=1,Type=Integer,Description=\"Homopolymer length to the right of report indel position\">\\n' )", "if (len(sys.argv) < 5): # sys.stderr.write('Usage:\\n'); # sys.stderr.write('\\t%s <reference_file_path> coverage_threshold", "bases; # print 'line_number: %d' % line_number; # print line;", "the position where a insertion occurs. The ref position should", "'\\n'); # sys.stderr.write('base_counts:\\n'); # sys.stderr.write(str(base_counts) + '\\n'); # sys.stderr.write('original_bases:\\n'); #", "most_common_insertion_count and most_common_deletion_count > non_indel_coverage_next_base ): # In this case,", "= bases[i] if base == r\"^\": # This is the", "of bases that need to be skipped in the string.", "== r\"^\": # This is the starting position of a", "ret_variant_list.append(variant_line); try: temp_sorted_bc = sorted_base_counts[-1][0] except: temp_sorted_bc = 0 indel_length", "%s\\tref = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count = %d\\tref_base =", "= [[\"A\", 0], [\"C\", 0], [\"T\", 0], [\"G\", 0]] sorted_base_counts", "there are insertions, get the most common one. if len(list(insertion_event_counts.keys()))", "wins, or the # insertion/deletion count is ambiguous. pass return", "ret_num_undercovered_bases[0] summary_lines += \"num_called_bases: %d\\n\" % ret_num_called_bases[0] summary_lines += \"num_correct_bases:", "os.path.getmtime(alignments_path_bam) ): # Convert the SAM file to a sorted", "for sam_file in sam_files: summary_file = prefix_for_intermediate_results + \".sum\" try:", "+= \"mpileup_file: %s\\n\" % mpileup_path summary_lines += \"coverage_threshold: %d\\n\" %", "+= \"num_correct_bases: %d\\n\" % ret_num_correct_bases[0] summary_lines += \"average_coverage: %.2f\\n\" %", "deletions on this base are ignored. # if (int(coverage) <", "threshold. Here we are interested even in the reads ###", "= line.strip().split(\"\\t\") if len(split_line) < 5 or len(split_line) > 6:", ") ret_vcf_list.append(vcf_line) ################## else: sys.stderr.write( \"\\nWarning: a SNP was detected,", "was detected, but there were no bases in the sorted_base_counts!\"", "variant_line = 'deletion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); #", "%d, undercovered = %d, coverage = %.2f\" % ( i,", "== ref_base: is_good = True break if is_good == False:", "%s\\tdeletion_counts = %s\\t%s\" % ( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_deletion_count,", "0) else (str(sorted_base_counts[-1][0])) ) qual = 1000 info = \"DP=%s;TYPE=del\"", "qualities = split_line[5] bases = \"\" # Replace the '.'", "the case where there are multiple equally good choices. #", "'deletion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); #", "# Replace the '.' and ',' signs with the actual", "+ \"\\n\") fp_vcf.flush() i += num_bases_to_skip i += 1 j", "str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) sys.stderr.write(\"\\n\") else: ret_num_correct_bases[0] += 1", "== 4000000): # print '\\nTEST\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr =", "winner. if insertion_unique == True: # ret_insertion_count[0] += most_common_insertion_length; ret_insertion_count[0]", "count total coverage of this base, or the non_indel_coverage_current_base? sorted_base_counts", "> os.path.getmtime(alignments_path_bam) ): # Convert the SAM file to a", "In this case, deletions are a clear winner. if deletion_unique", "sys.stdout.write(\"Reference base: %s\\n\" % (ref_base)) sys.stdout.write(\"Consensus base: %s\\n\\n\" % (base_count[0]))", "os.path.dirname(alignments_path) if dir_name == \"\": dir_name = \".\" alignments_path_bam =", "if (use_bed == False) else max((bed_pos_start - 10), 0) j", "= open(summary_file, \"r\") lines = fp_sum.readlines() fp_sum.close() except IOError: sys.stderr.write(", "( \"SNP\\tpos = %s\\tref = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count", "1 base_counts = {} insertion_count = 0 current_base_deletion_count = 0", "output ### alt_base = ( (\"{}\") if (len(sorted_base_counts) == 0)", "open file \"%s\" for reading!\\n' % summary_file ) continue fp_collect.write(\"\".join(lines)", "files will be written to disk.)\\n'); # exit(1); if len(sys.argv)", "<reponame>dschwoerer/samscripts #! /usr/bin/env python # Copyright <NAME>, 2015. www.sovic.org #", "% ( alignments_path, os.path.splitext(alignments_path_bam)[0], ) sys.stderr.write(command + \"\\n\") subprocess.call(command, shell=\"True\")", "isn't relevant, as deletions are counted prior to occuring, and", "= 0 # print 'position: %s' % position; # print", "# if (non_indel_coverage_current_base < coverage_threshold): if int(coverage) < coverage_threshold: ret_num_undercovered_bases[0]", "= 0 pass # variant_line = 'undercovered1\\tpos = %s\\tcoverage =", "the correctness of its extension. # Also, if input file", "An additional problematic case, discovered this on 03.11.2014., when analyzing", "num_bases_to_skip i += 1 j += 1 # if (i", "# if (int(coverage) < coverage_threshold or int(coverage) == current_base_deletion_count): #", "opposed to a low frequency variant).\">\\n' ) fp_vcf.write( '##INFO=<ID=HRUN,Number=1,Type=Integer,Description=\"Homopolymer length", "if output_prefix != \"\": # summary_file = output_prefix + '.conssum';", "% alignments_path) sys.stderr.write('Reference file \"%s\"...\\n' % reference_path) sys.stderr.write(\"Coverage threshold: %d\\n\"", "= 1000 info = \"DP=%s;TYPE=ins\" % (coverage) ref_field = ref_base", "%s\\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts),", "temp_sorted_bc = sorted_base_counts[-1][0] except: temp_sorted_bc = 0 indel_length = most_common_insertion_length", "ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## return most_common_deletion_length else:", "= %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s' % (position, int(coverage), non_indel_coverage_current_base,", "< bed_pos_start or current_pos >= bed_pos_end: i += 1 j", "#! /usr/bin/env python # Copyright <NAME>, 2015. www.sovic.org # #", "0], [\"G\", 0]] sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1) ) try:", "'^' marking the read start and a char marking the", "split_line[1] ref_base = split_line[2] coverage = split_line[3] original_bases = split_line[4]", "< len(bases): base = bases[i] if base == r\"^\": #", "input file is a SAM file, then convert it to", "fp_vcf.write( '##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele Frequency\">\\n' ) fp_vcf.write( '##INFO=<ID=SB,Number=1,Type=Integer,Description=\"Phred-scaled strand bias at this", "shell=\"True\") # Create the BAM index file. command = \"samtools", "+ '\\n'); # sys.stderr.write('line:\\n'); # sys.stderr.write(line.strip() + '\\n'); # most_common_base_count", "(i < bed_pos_end): # len(lines)): num_bases_to_skip = 0 for line", "is_good = False for base_count in sorted_base_counts: if base_count[1] ==", "Replace the '.' and ',' signs with the actual reference", "os.path.exists(mpileup_path) if mpileup_exists == False or ( mpileup_exists == True", "# consisting of: the special character '-', the number of", "sorted_deletion_counts[-1][1] most_common_deletion_length = len(sorted_deletion_counts[-1][0]) deletion_unique = ( True if (", "a insertion occurs. The ref position should contain the base", "= %d\\tnon_indel_cov_curr = %d\\tmost_common_insertion_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts", "alignments_path if os.path.exists(alignments_path) == False: sys.stderr.write('ERROR: File \"%s\" does not", "position\">\\n' ) fp_vcf.write( '##INFO=<ID=DP4,Number=4,Type=Integer,Description=\"Counts for ref-forward bases, ref-reverse, alt-forward and", "i += num_bases_to_skip i += 1 j += 1 #", "= ref_base alt_field = \"%s%s\" % (ref_base, sorted_insertion_counts[-1][0]) vcf_line =", "r\"+\": # This marks the occurance of an insertion. It", "actually care about the bases, # and we need to", "if (output_prefix != '-'): # CollectSummaries([sam_file], output_prefix, output_prefix + '.variant.sum');", "insertion_count in sorted_insertion_counts ] ) == 1 ) else False", "two # symbols: '^' marking the read start and a", "ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=False, ): # Split", "down there. elif base == r\"$\": # This marks the", "non_indel_coverage_next_base = ( int(coverage) - end_counts - deletion_count - insertion_count", "because of a deletion # consensus), then the deletions on", "if verbose == True: sys.stdout.write(\"Reference base: %s\\n\" % (ref_base)) sys.stdout.write(\"Consensus", "False bed_chromosome = \"\" bed_pos_start = 0 # bed_pos_end =", "position, ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## else: sys.stderr.write(", "up at this base (i.e. this base didn't get skipped", "file \"%s\"...\\n' % alignments_path) sys.stderr.write('Reference file \"%s\"...\\n' % reference_path) sys.stderr.write(\"Coverage", "this case, either the base count consensus wins, or the", "%d\\n\\n\" % bed_pos_end) # i = 0; i = 0", "+= 1 j += 1 # if (i > 10000):", "a pileup from a given SAM/BAM file, and calls consensus", "not open file \"%s\" for writing!\\n' % collective_output_file ) return", "(base_count[0])) # if (int(position) == 100000 or int(position) == 1000000", "fp_collect.write(\"\".join(lines) + \"\\n\") fp_collect.close() if __name__ == \"__main__\": # if", "insertions are a clear winner. if insertion_unique == True: #", "most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); ### Handling indel", "1 continue if thread_id == 0: if (j % 1000)", "the bases, # and we need to make an allele", "new BAM file will be generated. dir_name = os.path.dirname(alignments_path) if", "the end of a read. end_counts += 1 elif base", "# This is a deletion, just count it. current_base_deletion_count +=", "ret_variant_list.append(variant_line) ### VCF output ### qual = 1000 info =", "0 ref_name = split_line[0] position = split_line[1] ref_base = split_line[2]", "sorted_deletion_counts ] ) == 1 ) else False ) else:", "else max((bed_pos_start - 10), 0) j = 0 # while", "if a BAM file with the given name already exists.", "= ref_base alt_field = alt_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % (", "int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, (\"{}\") if (len(sorted_base_counts) == 0) else", "\"\": dir_name = \".\" alignments_path_bam = ( dir_name + \"/\"", "== \".\" or original_bases[i] == \",\": bases += ref_base else:", "already decided if there is going # to be a", "str(deletion_event_counts), ) ) ret_variant_list.append(variant_line) ### VCF output ### qual =", "decided if there is going # to be a deletion", "SAM file, then convert it to a sorted BAM. alignments_path_bam", "list(insertion_event_counts.items()), key=operator.itemgetter(1) ) most_common_insertion_count = sorted_insertion_counts[-1][1] most_common_insertion_length = len(sorted_insertion_counts[-1][0]) insertion_unique", "value): try: dict_counter[value] += 1 except: dict_counter[value] = 1 def", "line_number; # print line; # print ''; # sys.stdout.flush(); i", "print 'bases: \"%s\"' % bases; # print 'line_number: %d' %", "num_bases = int(bases[(i + 1) : j]) skip_bases = (j", "'\\tPosition parameter is a string specifying \"chromosome:start-end\"\\n\\n' ) exit(1) reference_file", "% (coverage) ref_field = \"%s%s\" % (ref_base, sorted_deletion_counts[-1][0]) alt_field =", "ret_coverage_sum[0] += int(coverage) # TODO: Should I count total coverage", "True break if is_good == False: if len(sorted_base_counts) > 0:", "try: fp_collect = open(collective_output_file, \"w\") except IOError: sys.stderr.write( 'ERROR: Could", "= False for base_count in sorted_base_counts: if base_count[1] == most_common_base_count:", "== 0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) )", "as ref, but the alt field contains the ref base", "ref_base alt_field = \"N\" vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name,", ") return for sam_file in sam_files: summary_file = prefix_for_intermediate_results +", "### which had a '*' at the current position (because", "bed_position = sys.argv[5] # sys.stderr.write('bed_position: \"%s\"\\n\\n' % bed_position); processes =", "that need to be skipped. i += skip_bases elif base", "0 insertion_unique = False # Sanity check, just to see", "= (j - i) + num_bases - 1 deletion_count +=", "or int(position) == 2000000 or int(position) == 3000000 or int(position)", "- 1 deletion_count += 1 deletion = bases[j : (j", "] ) == 1 ) else False ) else: most_common_insertion_count", "collective_output_file ) return for sam_file in sam_files: summary_file = prefix_for_intermediate_results", "10), 0) j = 0 # while (i < bed_pos_end):", "the ref base + the insertion event. ### VCF output", "case, because we count deletions one by one # through", "object # consisting of: the special character '+', the number", "sys.stderr.write(line + \"\\n\") return 0 ref_name = split_line[0] position =", "len(bases): base = bases[i] if base == r\"^\": # This", "most_common_deletion_count) for deletion_count in sorted_deletion_counts ] ) == 1 )", "10000): # break; fp.close() sys.stderr.write(\"\\n\") if fp_variant != None: fp_variant.close()", "Could not open file \"%s\" for writing!\\n' % collective_output_file )", "\"w\") fp_sum.write(summary_lines) fp_sum.close() return summary_file except IOError: sys.stderr.write( 'ERROR: Could", "the choice which is equal to the reference. is_good =", "# Skip the length of the numeric entry plus the", "of the current base with the coverage threshold. # However,", "sam_file = sys.argv[4] bed_position = \"\" if len(sys.argv) > 5:", "(str(sorted_base_counts[-1][0])) ) qual = 1000 info = \"DP=%s;TYPE=snp\" % (coverage)", "< 5 or len(split_line) > 6: sys.stderr.write(line + \"\\n\") return", "check, just to see if there actually were any deletions", "exists. if alignments_path_bam_exists == False or ( alignments_path_bam_exists == True", "%s %s.bai\" % ( alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\") elif", "the current position (because we don't know where it ends).", "!= None: fp_variant.write(\"\\n\".join(ret_variant_list[variant_list_length:]) + \"\\n\") fp_variant.flush() if len(ret_vcf_list) > vcf_list_length", "split_line = line.strip().split(\"\\t\") if len(split_line) < 5 or len(split_line) >", "and at that point is already decided if there is", "(most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); # return most_common_deletion_length;", "the non_indel_coverage_current_base? sorted_base_counts = [[\"A\", 0], [\"C\", 0], [\"T\", 0],", "% ret_deletion_count[0] summary_lines += \"num_undercovered_bases: %d\\n\" % ret_num_undercovered_bases[0] summary_lines +=", "-d 1000000 -Q 0 -A -f %s %s > %s.mpileup\"", "them as undercovered bases. non_indel_coverage_current_base = int(coverage) - current_base_deletion_count if", "occurance of an insertion. It is a composite object #", "sum( [ int(insertion_count[1] == most_common_insertion_count) for insertion_count in sorted_insertion_counts ]", "mpileup_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ): fp = None try:", "else (str(sorted_base_counts[-1][0])) ) qual = 1000 info = \"DP=%s;TYPE=ins\" %", "%s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\\n' %", "dict_counter[value] += 1 except: dict_counter[value] = 1 def process_mpileup_line( line,", "= bases[j : (j + num_bases)].upper() increase_in_dict(deletion_event_counts, deletion) # Skip", "but there were no bases in the sorted_base_counts!\" ) variant_line", "\"%s%s\" % (ref_base, sorted_insertion_counts[-1][0]) vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name,", "exist!\\n' % alignments_path) return if alignments_path.endswith(\"sam\"): # Determine the path", "the VCF format specifies the position where a insertion occurs.", "line.strip(), ) ) ret_variant_list.append(variant_line) ### Deletions in the VCF format", "insertion occurs. The ref position should contain the base which", "+= skip_bases else: increase_in_dict(base_counts, bases[i].upper()) i += 1 # TODO:", "ret_insertion_count[0] summary_lines += \"deletion_count: %d\\n\" % ret_deletion_count[0] summary_lines += \"num_undercovered_bases:", "alignments_path_bam_exists == False or ( alignments_path_bam_exists == True and os.path.getmtime(alignments_path)", "[\"T\", 0], [\"G\", 0]] sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1) )", "sys.stderr.write('bed_position: \"%s\"\\n\\n' % bed_position); processes = [] if output_prefix ==", "shell=\"True\") elif alignments_path.endswith(\"bam\") == False: sys.stderr.write( 'ERROR: File extension needs", "if is_good == False: if len(sorted_base_counts) > 0: ret_snp_count[0] +=", "%d\\n\" % coverage_threshold summary_lines += \"snp_count: %d\\n\" % ret_snp_count[0] summary_lines", "interested even in the reads ### which had a '*'", "strand bias at this position\">\\n' ) fp_vcf.write( '##INFO=<ID=DP4,Number=4,Type=Integer,Description=\"Counts for ref-forward", "are counted prior to occuring, and at that point is", "= bases[j : (j + num_bases)].upper() increase_in_dict(insertion_event_counts, insertion) i +=", "file. command = \"samtools view -bS %s | samtools sort", "= [0] ret_num_called_bases = [0] ret_num_correct_bases = [0] ret_coverage_sum =", "if thread_id == 0: if (j % 1000) == 0:", "(ref_base, sorted_deletion_counts[-1][0]) alt_field = ref_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % (", "extension needs to be either .sam or .bam! Input file", "(coverage) ref_field = ref_base alt_field = \"N\" vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\"", "reference_path, mpileup_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ): fp = None", "bases, # and we need to make an allele aware", "just to see if there actually were any insertions (to", "# sys.stderr.write('line:\\n'); # sys.stderr.write(line.strip() + '\\n'); # most_common_base_count = 0;", "== r\"$\": # This marks the end of a read.", "i = 0; i = 0 if (use_bed == False)", "Deletions in the VCF format specifies the position where a", "consisting of: the special character '-', the number of the", "= %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_deletion_count = %d\\tref_base = %s\\tcons_base", "the # mapping quality of the read. # increase_in_dict(base_counts, bases[i", "deletion_count - insertion_count ) if ( non_indel_coverage_next_base + deletion_count +", "%d\\n\" % coverage_threshold) summary_file = process_mpileup( alignments_path, reference_path, (\"%s.mpileup\" %", "\".join(sys.argv))) fp_vcf.write(\"##reference=%s\\n\" % reference_path) fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw Depth\">\\n') fp_vcf.write( '##INFO=<ID=TYPE,Number=A,Type=String,Description=\"Type of each", "How should this be handled properly? # Example line from", "reference_path) fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw Depth\">\\n') fp_vcf.write( '##INFO=<ID=TYPE,Number=A,Type=String,Description=\"Type of each allele (snp, ins,", "alignments_path ) return # Convert the sorted BAM file to", "# In this case, deletions are a clear winner. if", "+= original_bases[i] i += 1 base_counts = {} insertion_count =", "inserted bases # and the actual bases that are inserted", "int(position) == 4000000): # print '\\nTEST\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr", "to disk.)\\n'); # exit(1); if len(sys.argv) < 5: sys.stderr.write(\"Usage:\\n\") sys.stderr.write(", "one. if len(list(deletion_event_counts.keys())) > 0: sorted_deletion_counts = sorted( list(deletion_event_counts.items()), key=operator.itemgetter(1)", "str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); variant_line = ( \"undercovered1\\tpos", "of: the special character '-', the number of the deleted", "disk.)\\n' ) sys.stderr.write( '\\tPosition parameter is a string specifying \"chromosome:start-end\"\\n\\n'", "True: # ret_deletion_count[0] += most_common_deletion_length; ret_deletion_count[0] += 1 # variant_line", "else: bed_pos_start = int(bed_pos_split[0]) bed_pos_end = int(bed_pos_split[1]) use_bed = True", "= \"%s%s\" % (ref_base, sorted_insertion_counts[-1][0]) vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % (", "info, ) ret_vcf_list.append(vcf_line) ################## return most_common_deletion_length else: # In this", "= open(mpileup_path, \"r\") except IOError: sys.stderr.write( 'ERROR: Could not open", "vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field, alt_field, qual,", "if num_bases_to_skip > 0: num_bases_to_skip -= 1 continue if use_bed", "that the variant is an INDEL.\">\\n' ) fp_vcf.write( '##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description=\"Indicates that", "handle them as undercovered bases. non_indel_coverage_current_base = int(coverage) - current_base_deletion_count", "exist yet. mpileup_path = \"%s.mpileup\" % alignments_path_bam mpileup_exists = os.path.exists(mpileup_path)", "+= 0; ret_coverage_sum[0] += int(coverage) # TODO: Should I count", "# ret_deletion_count[0] += most_common_deletion_length; ret_deletion_count[0] += 1 # variant_line =", "fp_vcf.write(\"##fileformat=VCFv4.0\\n\") fp_vcf.write(\"##fileDate=20150409\\n\") fp_vcf.write(\"##source=%s\\n\" % (\" \".join(sys.argv))) fp_vcf.write(\"##reference=%s\\n\" % reference_path) fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw", "sys.stderr.write('ERROR: File \"%s\" does not exist!\\n' % alignments_path) return if", "0 # print 'position: %s' % position; # print 'bases:", "number of bases # that need to be skipped. i", "just count it. current_base_deletion_count += 1 elif base == r\"-\":", "position, ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, (\"{}\") if (len(sorted_base_counts) ==", "occurance of deletions. It is a composite object # consisting", "bed_position = \"\" if len(sys.argv) > 5: bed_position = sys.argv[5]", "str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### Deletions in", "[0] # lines = fp.readlines(); fp_variant = None fp_vcf =", ") sys.stderr.write(\"\\n\") else: ret_num_correct_bases[0] += 1 if verbose == True:", "'##INFO=<ID=SB,Number=1,Type=Integer,Description=\"Phred-scaled strand bias at this position\">\\n' ) fp_vcf.write( '##INFO=<ID=DP4,Number=4,Type=Integer,Description=\"Counts for", "True if ( sum( [ int(insertion_count[1] == most_common_insertion_count) for insertion_count", "else False ) else: most_common_deletion_count = 0 most_common_deletion_length = 0", "be written to disk.)\\n'); # exit(1); if len(sys.argv) < 5:", "int(bases[(i + 1) : j]) skip_bases = (j - i)", "number of the deleted bases # and the actual bases", "format specifies the position where a insertion occurs. The ref", "+= most_common_deletion_length; ret_deletion_count[0] += 1 # variant_line = 'deletion\\t%d\\t%s\\t%s\\t%s\\t%s' %", "bed_pos_start = 0 # bed_pos_end = len(lines); bed_pos_end = -1", "line.strip()); # ret_variant_list.append(variant_line); # return most_common_deletion_length; variant_line = ( \"del\\tpos", "thread_id=0, bed_position=\"\", ): # Sanity checking the existence of the", "marking, but here we actually care about the bases, #", "ret_variant_list.append(variant_line); # return most_common_deletion_length; variant_line = ( \"del\\tpos = %s\\tref", "location specified through commandline:\\n\") sys.stderr.write('\\tChromosome: \"%s\"\\n' % bed_chromosome) sys.stderr.write(\"\\tStart: %d\\n\"", "\"ins\\tpos = %s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_insertion_count =", "# There are pileup bases that do not have any", "denoted with the '*' sign, which I think # isn't", "no bases in the sorted_base_counts!\" ) variant_line = ( \"SNP\\tpos", "( dir_name + \"/\" + os.path.splitext(os.path.basename(alignments_path))[0] + \".bam\" ) alignments_path_bam_exists", "file is a SAM file, then convert it to a", "% (output_prefix, coverage_threshold) fp_variant = open(variant_file, \"w\") vcf_file = \"%s-cov_%d.variant.vcf\"", "or ( mpileup_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path) ):", "= alignments_path if os.path.exists(alignments_path) == False: sys.stderr.write('ERROR: File \"%s\" does", "mpileup -B -d 1000000 -Q 0 -A -f %s %s", "composite object # consisting of: the special character '-', the", "= sys.argv[4] bed_position = \"\" if len(sys.argv) > 5: bed_position", "not have any actual bases, but only the '*' symbols.", "%d\\tnon_indel_cov_curr = %d\\tmost_common_deletion_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts =", "the '*' symbols. How should this be handled properly? #", "< 5): # sys.stderr.write('Usage:\\n'); # sys.stderr.write('\\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1>", "str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### VCF output", "os.path.getmtime(mpileup_path) ): command = \"samtools mpileup -B -d 1000000 -Q", "int(coverage) == current_base_deletion_count): # if (non_indel_coverage_current_base < coverage_threshold): if int(coverage)", "perform a sanity check. split_line = line.strip().split(\"\\t\") if len(split_line) <", "except IOError: sys.stderr.write( 'ERROR: Could not open file \"%s\" for", "allele aware count. # Get the number of bases that", "Handling indel consensus. ### Put a different coverage threshold. Here", "1 elif base == r\"-\": # This marks the occurance", "= sys.argv[3] sam_file = sys.argv[4] bed_position = \"\" if len(sys.argv)", ") sys.stderr.write(summary_lines + \"\\n\") sys.stderr.write(\"\\n\") if output_prefix != \"\": #", "fp = open(mpileup_path, \"r\") except IOError: sys.stderr.write( 'ERROR: Could not", "- insertion_count ) if ( non_indel_coverage_next_base + deletion_count + insertion_count", "%.2f\\n\" % ( (float(ret_coverage_sum[0]) / float((i + 1))) ) sys.stderr.write(summary_lines", "open file \"%s\" for writing!\\n' % collective_output_file ) return for", "except Exception as e: pass # sys.stderr.write(str(e) + '\\n'); #", "if len(sys.argv) > 5: bed_position = sys.argv[5] # sys.stderr.write('bed_position: \"%s\"\\n\\n'", "ref, but the alt field contains the ref base +", "use_bed = False else: bed_chromosome = bed_split[0] bed_pos_split = bed_split[1].split(\"-\")", "to occuring, and at that point is already decided if", "index out of bounds error). # If there are deletions,", "position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count, ref_base, temp_sorted_bc, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts),", "0 indel_length = most_common_insertion_length variant_line = ( \"ins\\tpos = %s\\tref", "this base (i.e. this base didn't get skipped because of", "insertions (to avoid index out of bounds error). # If", "the line, and perform a sanity check. split_line = line.strip().split(\"\\t\")", "there were no bases in the sorted_base_counts!\" ) variant_line =", "line_number, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum,", "mpileup_exists = os.path.exists(mpileup_path) if mpileup_exists == False or ( mpileup_exists", "try: most_common_base_count = sorted_base_counts[-1][1] except Exception as e: pass #", "fp = None try: fp = open(mpileup_path, \"r\") except IOError:", "= \"\" # Replace the '.' and ',' signs with", "the special character '-', the number of the deleted bases", "= ( int(coverage) - end_counts - deletion_count - insertion_count )", "% bed_chromosome) sys.stderr.write(\"\\tStart: %d\\n\" % bed_pos_start) sys.stderr.write(\"\\tEnd: %d\\n\\n\" % bed_pos_end)", "the number of the deleted bases # and the actual", "didn't get skipped because of a deletion # consensus), then", "thread_id=0, bed_position=\"\", ): fp = None try: fp = open(mpileup_path,", "1 deletion = bases[j : (j + num_bases)].upper() increase_in_dict(deletion_event_counts, deletion)", "to \"-\", no files will be written to disk.)\\n'); #", "deletion, just count it. current_base_deletion_count += 1 elif base ==", "insertion event. ### VCF output ### alt_base = ( (\"{}\")", "5: sys.stderr.write(\"Usage:\\n\") sys.stderr.write( \"\\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\\n\" %", "False else: bed_chromosome = bed_split[0] bed_pos_split = bed_split[1].split(\"-\") if len(bed_pos_split)", "except: temp_sorted_bc = 0 indel_length = most_common_insertion_length variant_line = (", "at the current position (because we don't know where it", "properly? # Example line from the mpileup file: # gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome", "coverage_threshold, verbose=False, ): # Split the line, and perform a", "i += 1 # Increase only by 1, because we", "It is a composite object # consisting of: the special", "verbose == True: sys.stdout.write(\"%s\\nbase_counts: %s\\n\" % (line.strip(), str(base_counts))) # EDIT:", "4000000): # print '\\nTEST\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count", "ret_deletion_count[0], ret_num_undercovered_bases[0], (float(ret_coverage_sum[0]) / float((i + 1))), ) ) sys.stderr.flush()", "index out of bounds error). # If there are insertions,", "index %s %s.bai\" % ( alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\")", ") fp_vcf.write( '##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description=\"Indicates that the variant is a consensus variant", "if input file is a SAM file, then convert it", "use_bed = False else: bed_pos_start = int(bed_pos_split[0]) bed_pos_end = int(bed_pos_split[1])", "increase_in_dict(base_counts, bases[i + 1].upper()); i += 1 # Increase only", "== 100000 or int(position) == 1000000 or int(position) == 2000000", "on 03.11.2014., when analyzing BWA-MEM's mpileup. # There are pileup", "good choices. # In this case, we prefer the choice", "base. i = 0 while i < len(original_bases): if original_bases[i]", "position (because we don't know where it ends). non_indel_coverage_next_base =", ") if len(ret_variant_list) > variant_list_length and fp_variant != None: fp_variant.write(\"\\n\".join(ret_variant_list[variant_list_length:])", "print line_split[0]; # print bed_chromosome; i += 1 j +=", "# Also, if input file is a SAM file, then", "qual, info, ) ret_vcf_list.append(vcf_line) ################## return most_common_deletion_length else: # In", "\"\\n\") fp_collect.close() if __name__ == \"__main__\": # if (len(sys.argv) <", "print ''; # sys.stdout.flush(); i = 0 while i <", "\".bam\" ) alignments_path_bam_exists = os.path.exists(alignments_path_bam) # Check if a BAM", "bases. non_indel_coverage_current_base = int(coverage) - current_base_deletion_count if verbose == True:", "+= 1 continue else: # print line_split[0]; # print bed_chromosome;", "<NAME>, 2015. www.sovic.org # # Creates a pileup from a", "ret_num_undercovered_bases = [0] ret_num_called_bases = [0] ret_num_correct_bases = [0] ret_coverage_sum", "1 continue else: # print line_split[0]; # print bed_chromosome; i", ") sys.stderr.flush() variant_list_length = len(ret_variant_list) vcf_list_length = len(ret_vcf_list) num_bases_to_skip =", "(\" \".join(sys.argv))) fp_vcf.write(\"##reference=%s\\n\" % reference_path) fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw Depth\">\\n') fp_vcf.write( '##INFO=<ID=TYPE,Number=A,Type=String,Description=\"Type of", "!= 2: use_bed = False else: bed_pos_start = int(bed_pos_split[0]) bed_pos_end", "= %s\\t%s\\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts),", "= sorted_base_counts[-1][0] except: temp_sorted_bc = 0 indel_length = most_common_insertion_length variant_line", "sorted BAM file to a mpileup file if it doesn't", "had a '*' at the current position (because we don't", "= %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count = %d\\tref_base = %s\\tcons_base", "most_common_base_count = sorted_base_counts[-1][1] except Exception as e: pass # sys.stderr.write(str(e)", "it. current_base_deletion_count += 1 elif base == r\"-\": # This", "mpileup file: # gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D- #", "%s\\tinsertion_counts = %s\\tdeletion_counts = %s\" % ( position, ref_name, int(coverage),", "= open(collective_output_file, \"w\") except IOError: sys.stderr.write( 'ERROR: Could not open", "Get the number of bases that need to be skipped", "+ deletion_count + insertion_count ) > coverage_threshold: # Sanity check,", "= None fp_vcf = None if output_prefix != \"\": if", "reading!\\n' % mpileup_path ) return None ret_variant_list = [] ret_vcf_list", "!= None: fp_variant.close() if fp_vcf != None: fp_vcf.close() summary_lines =", "base: %s\\n\" % (ref_base)) sys.stdout.write(\"Consensus base: %s\\n\\n\" % (base_count[0])) #", "where there are multiple equally good choices. # In this", "+ \".bam\" ) alignments_path_bam_exists = os.path.exists(alignments_path_bam) # Check if a", "lines = fp.readlines(); fp_variant = None fp_vcf = None if", "base == r\"^\": # This is the starting position of", "accounts for the deletions denoted with the '*' sign, which", "else: bed_chromosome = bed_split[0] bed_pos_split = bed_split[1].split(\"-\") if len(bed_pos_split) !=", "bed_split[0] bed_pos_split = bed_split[1].split(\"-\") if len(bed_pos_split) != 2: use_bed =", "True: # ret_insertion_count[0] += most_common_insertion_length; ret_insertion_count[0] += 1 ret_num_called_bases[0] +=", "if output_prefix == \"-\": output_prefix = os.path.splitext(sam_file)[0] main(sam_file, reference_file, coverage_threshold,", "<{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\\n' % sys.argv[0]); # sys.stderr.write('\\t(If <collective_output_file> is", "= prefix_for_intermediate_results + \".sum\" try: fp_sum = open(summary_file, \"r\") lines", "alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## elif ( most_common_deletion_count >", "ref_base, temp_sorted_bc, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ###", "% (ref_base, sorted_deletion_counts[-1][0]) alt_field = ref_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" %", "line, line_number, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases,", "consensus. sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1) ) try: most_common_base_count =", "actual bases, but only the '*' symbols. How should this", "[0] ret_coverage_sum = [0] # lines = fp.readlines(); fp_variant =", "(most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); try: temp_sorted_bc =", "% sys.argv[0]); # sys.stderr.write('\\t(If <collective_output_file> is equal to \"-\", no", "insertions, get the most common one. if len(list(insertion_event_counts.keys())) > 0:", "1000000 or int(position) == 2000000 or int(position) == 3000000 or", "open(mpileup_path, \"r\") except IOError: sys.stderr.write( 'ERROR: Could not open file", "int(coverage) # TODO: Should I count total coverage of this", "+ '\\n'); # sys.stderr.write('original_bases:\\n'); # sys.stderr.write(str(original_bases) + '\\n'); # sys.stderr.write('line:\\n');", "'*' character. # Get the number of bases that need", "+= 1 # TODO: An additional problematic case, discovered this", "sys.stderr.write('Reference file \"%s\"...\\n' % reference_path) sys.stderr.write(\"Coverage threshold: %d\\n\" % coverage_threshold)", "non_indel_coverage_current_base, most_common_deletion_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) )", "we wound up at this base (i.e. this base didn't", "will be generated. dir_name = os.path.dirname(alignments_path) if dir_name == \"\":", "is a composite object # consisting of: the special character", "os.path.exists(alignments_path) == False: sys.stderr.write('ERROR: File \"%s\" does not exist!\\n' %", "1000 info = \"DP=%s;TYPE=snp\" % (coverage) ref_field = ref_base alt_field", "care about the bases, # and we need to make", "ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=False,", "\"DP=%s;TYPE=snp\" % (coverage) ref_field = ref_base alt_field = alt_base vcf_line", "for line in fp: # line = lines[i]; if num_bases_to_skip", "+= ref_base else: bases += original_bases[i] i += 1 base_counts", ") alignments_path_bam_exists = os.path.exists(alignments_path_bam) # Check if a BAM file", "an insertion. It is a composite object # consisting of:", "= 0 while i < len(original_bases): if original_bases[i] == \".\"", "view -bS %s | samtools sort - %s\" % (", "and the actual bases that are deleted (these bases follow", "ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ###", "bases follow the current position). # In our approach, we", "# Get the number of bases that need to be", "the total coverage of the current base with the coverage", "object # consisting of: the special character '-', the number", "deletions are counted prior to occuring, and at that point", "0: sorted_deletion_counts = sorted( list(deletion_event_counts.items()), key=operator.itemgetter(1) ) most_common_deletion_count = sorted_deletion_counts[-1][1]", "Should I count total coverage of this base, or the", "len(sys.argv) > 5: bed_position = sys.argv[5] # sys.stderr.write('bed_position: \"%s\"\\n\\n' %", "if base_count[1] == most_common_base_count: if base_count[0] == ref_base: is_good =", "bed_pos_end = len(lines); bed_pos_end = -1 if bed_position != \"\":", "[0] ret_num_undercovered_bases = [0] ret_num_called_bases = [0] ret_num_correct_bases = [0]", "variant_list_length and fp_variant != None: fp_variant.write(\"\\n\".join(ret_variant_list[variant_list_length:]) + \"\\n\") fp_variant.flush() if", "sorted BAM. alignments_path_bam = alignments_path if os.path.exists(alignments_path) == False: sys.stderr.write('ERROR:", "a composite object # consisting of: the special character '+',", "%s %s > %s.mpileup\" % ( reference_path, alignments_path_bam, alignments_path_bam, )", "actual bases that are deleted (these bases follow the current", "< coverage_threshold): if int(coverage) < coverage_threshold: ret_num_undercovered_bases[0] += 1 #", "+ '\\n'); # sys.stderr.write('sorted_base_counts:\\n'); # sys.stderr.write(str(sorted_base_counts) + '\\n'); # sys.stderr.write('base_counts:\\n');", "+ \".sum\" try: fp_sum = open(summary_file, \"r\") lines = fp_sum.readlines()", "for reading!\\n' % summary_file ) continue fp_collect.write(\"\".join(lines) + \"\\n\") fp_collect.close()", "= line.strip().split(\"\\t\") if len(line_split) > 2 and line_split[0] == bed_chromosome:", "str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### Insertions in the", "bed_position=\"\", ): # Sanity checking the existence of the file,", "= sys.argv[5] # sys.stderr.write('bed_position: \"%s\"\\n\\n' % bed_position); processes = []", "): fp_collect = None try: fp_collect = open(collective_output_file, \"w\") except", "= len(ret_vcf_list) num_bases_to_skip = process_mpileup_line( line, i, ret_variant_list, ret_vcf_list, ret_snp_count,", "= %d, undercovered = %d, coverage = %.2f\" % (", "import os import sys import operator import subprocess def increase_in_dict(dict_counter,", "ref_base else: bases += original_bases[i] i += 1 base_counts =", "main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position) # if (output_prefix !=", "bias at this position\">\\n' ) fp_vcf.write( '##INFO=<ID=DP4,Number=4,Type=Integer,Description=\"Counts for ref-forward bases,", "the '*' character. # Get the number of bases that", "relevant, as deletions are counted prior to occuring, and at", "of this base, or the non_indel_coverage_current_base? sorted_base_counts = [[\"A\", 0],", "info = \"DP=%s;TYPE=ins\" % (coverage) ref_field = ref_base alt_field =", "else: bases += original_bases[i] i += 1 base_counts = {}", "summary_lines += \"average_coverage: %.2f\\n\" % ( (float(ret_coverage_sum[0]) / float((i +", "[] if output_prefix == \"-\": output_prefix = os.path.splitext(sam_file)[0] main(sam_file, reference_file,", "1 ) else False ) else: most_common_insertion_count = 0 most_common_insertion_length", "# print 'bases: \"%s\"' % bases; # print 'line_number: %d'", "# break; fp.close() sys.stderr.write(\"\\n\") if fp_variant != None: fp_variant.close() if", "that are inserted; j = i + 1 while bases[j]", "dir_name == \"\": dir_name = \".\" alignments_path_bam = ( dir_name", "bed_pos_end = -1 if bed_position != \"\": bed_split = bed_position.split(\":\")", "= {} end_counts = 0 # print 'position: %s' %", "# However, the total coverage also accounts for the deletions", "%d\\tref_base = %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts =", "# print line_split[0]; # print bed_chromosome; i += 1 j", "# variant_line = 'deletion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());", "on this base are ignored. # if (int(coverage) < coverage_threshold", "( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count, ref_base, temp_sorted_bc, str(sorted_base_counts), str(insertion_event_counts),", "2 and line_split[0] == bed_chromosome: current_pos = int(line_split[1]) if current_pos", "# bed_pos_end = len(lines); bed_pos_end = -1 if bed_position !=", "len(lines)): num_bases_to_skip = 0 for line in fp: # line", "get the most common one. if len(list(deletion_event_counts.keys())) > 0: sorted_deletion_counts", "1))) ) sys.stderr.write(summary_lines + \"\\n\") sys.stderr.write(\"\\n\") if output_prefix != \"\":", "1 # ret_variant_list.append(line_number); variant_line = ( \"SNP\\tpos = %s\\tref =", "case, insertions are a clear winner. if insertion_unique == True:", "VCF format specifies the position where a insertion occurs. The", "else (str(sorted_base_counts[-1][0])) ) qual = 1000 info = \"DP=%s;TYPE=del\" %", "through the '*' character. # Get the number of bases", "-Q 0 -A -f %s %s > %s.mpileup\" % (", "int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); ###", "- %s\" % ( alignments_path, os.path.splitext(alignments_path_bam)[0], ) sys.stderr.write(command + \"\\n\")", "the starting position of a read. It encodes two #", "position). # Similar to the deletion marking, but here we", "skipped. i += skip_bases elif base == r\"+\": # This", "fp_variant != None: fp_variant.write(\"\\n\".join(ret_variant_list[variant_list_length:]) + \"\\n\") fp_variant.flush() if len(ret_vcf_list) >", "%s\\n\" % alignments_path summary_lines += \"mpileup_file: %s\\n\" % mpileup_path summary_lines", "This is the starting position of a read. It encodes", "else: # In this case, either the base count consensus", "commandline:\\n\") sys.stderr.write('\\tChromosome: \"%s\"\\n' % bed_chromosome) sys.stderr.write(\"\\tStart: %d\\n\" % bed_pos_start) sys.stderr.write(\"\\tEnd:", "alignments_path summary_lines += \"mpileup_file: %s\\n\" % mpileup_path summary_lines += \"coverage_threshold:", "Convert the sorted BAM file to a mpileup file if", "the sorted_base_counts!\" ) variant_line = ( \"SNP\\tpos = %s\\tref =", "== 1 ) else False ) else: most_common_deletion_count = 0", "sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1) ) try: most_common_base_count = sorted_base_counts[-1][1]", "most_common_deletion_length; ret_deletion_count[0] += 1 # variant_line = 'deletion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_deletion_count,", "ref_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field, alt_field,", "most_common_base_count = 0; # Allow for the case where there", "bases, ref-reverse, alt-forward and alt-reverse bases\">\\n' ) fp_vcf.write( '##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that", "to be either .sam or .bam! Input file path: \"%s\".\\n'", "deletion_count += 1 deletion = bases[j : (j + num_bases)].upper()", "equally good choices. # In this case, we prefer the", "= sorted( list(insertion_event_counts.items()), key=operator.itemgetter(1) ) most_common_insertion_count = sorted_insertion_counts[-1][1] most_common_insertion_length =", "temp_sorted_bc, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### Insertions", "the mpileup file: # gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D-", "\"mpileup_file: %s\\n\" % mpileup_path summary_lines += \"coverage_threshold: %d\\n\" % coverage_threshold", "sys.argv[0]); # sys.stderr.write('\\t(If <collective_output_file> is equal to \"-\", no files", "% ( alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\") elif alignments_path.endswith(\"bam\") ==", "None def main( alignments_path, reference_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ):", "line.strip(), ) ) ret_variant_list.append(variant_line) ### Insertions in the VCF format", "(use_bed == False) else max((bed_pos_start - 10), 0) j =", "== 2000000 or int(position) == 3000000 or int(position) == 4000000):", "current position). # In our approach, we ignore this case,", "and the actual bases that are inserted (these bases follow", "\"average_coverage: %.2f\\n\" % ( (float(ret_coverage_sum[0]) / float((i + 1))) )", "Convert the SAM file to a sorted BAM file. command", "= split_line[5] bases = \"\" # Replace the '.' and", "the first base being non-deletion, and the following bases being", "str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); variant_line = ( \"undercovered1\\tpos =", "% (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); try: temp_sorted_bc", "max((bed_pos_start - 10), 0) j = 0 # while (i", "ref_name, position, ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## elif", "make an allele aware count. # Get the number of", "sorted_base_counts: if base_count[1] == most_common_base_count: if base_count[0] == ref_base: is_good", "base, or the non_indel_coverage_current_base? sorted_base_counts = [[\"A\", 0], [\"C\", 0],", "= %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s' % (position,", "are interested even in the reads ### which had a", "field contains the ref base + the insertion event. ###", "most_common_base_count, ref_base, (\"{}\") if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts),", "1 # if (i > 10000): # break; fp.close() sys.stderr.write(\"\\n\")", "but here we actually care about the bases, # and", "num_bases_to_skip > 0: num_bases_to_skip -= 1 continue if use_bed ==", "of a read. It encodes two # symbols: '^' marking", "\"%s\".\\n' % alignments_path ) return # Convert the sorted BAM", "ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## else: ret_num_called_bases[0] +=", "= %s\\t%s\" % ( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count, ref_base,", "being a deletion event. ### VCF output ### alt_base =", "None if output_prefix != \"\": if not os.path.exists(os.path.dirname(output_prefix)): os.makedirs(os.path.dirname(output_prefix)) variant_file", "= int(bed_pos_split[0]) bed_pos_end = int(bed_pos_split[1]) use_bed = True sys.stderr.write(\"Using location", "= %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s'", "where it ends). non_indel_coverage_next_base = ( int(coverage) - end_counts -", "def CollectSummaries( sam_files, prefix_for_intermediate_results, collective_output_file ): fp_collect = None try:", "# and the actual bases that are deleted (these bases", "coverage_threshold) fp_variant = open(variant_file, \"w\") vcf_file = \"%s-cov_%d.variant.vcf\" % (output_prefix,", "file, and calls consensus bases (or variants). import os import", "= ref_base alt_field = \"N\" vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % (", "base = bases[i] if base == r\"^\": # This is", "if ( sum( [ int(insertion_count[1] == most_common_insertion_count) for insertion_count in", "[0] ret_insertion_count = [0] ret_deletion_count = [0] ret_num_undercovered_bases = [0]", "which I think # isn't relevant, as deletions are counted", "sys.stderr.write( '\\tPosition parameter is a string specifying \"chromosome:start-end\"\\n\\n' ) exit(1)", "len(ret_variant_list) vcf_list_length = len(ret_vcf_list) num_bases_to_skip = process_mpileup_line( line, i, ret_variant_list,", "it to a sorted BAM. alignments_path_bam = alignments_path if os.path.exists(alignments_path)", "use_bed = True sys.stderr.write(\"Using location specified through commandline:\\n\") sys.stderr.write('\\tChromosome: \"%s\"\\n'", "at this base (i.e. this base didn't get skipped because", "know where it ends). non_indel_coverage_next_base = ( int(coverage) - end_counts", "'ERROR: Could not open file \"%s\" for reading!\\n' % mpileup_path", "+= 1 j += 1 continue else: # print line_split[0];", "\"DP=%s;TYPE=snp\" % (coverage) ref_field = ref_base alt_field = \"N\" vcf_line", "if len(split_line) < 5 or len(split_line) > 6: sys.stderr.write(line +", ") subprocess.call(command, shell=\"True\") sys.stderr.write('Processing file \"%s\"...\\n' % alignments_path) sys.stderr.write('Reference file", "# sys.stderr.write('Usage:\\n'); # sys.stderr.write('\\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3>", "len(sys.argv) < 5: sys.stderr.write(\"Usage:\\n\") sys.stderr.write( \"\\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_>", ") == 1 ) else False ) else: most_common_insertion_count =", "str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); try: temp_sorted_bc = sorted_base_counts[-1][0] except:", "fp_vcf.write(\"##fileDate=20150409\\n\") fp_vcf.write(\"##source=%s\\n\" % (\" \".join(sys.argv))) fp_vcf.write(\"##reference=%s\\n\" % reference_path) fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw Depth\">\\n')", "BAM file with the given name already exists. if alignments_path_bam_exists", "a different coverage threshold. Here we are interested even in", "occuring, and at that point is already decided if there", "< len(original_bases): if original_bases[i] == \".\" or original_bases[i] == \",\":", "split_line[0] position = split_line[1] ref_base = split_line[2] coverage = split_line[3]", "% coverage_threshold summary_lines += \"snp_count: %d\\n\" % ret_snp_count[0] summary_lines +=", "# sys.stderr.write('\\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\\n' %", "deletions, get the most common one. if len(list(deletion_event_counts.keys())) > 0:", "\"%s\"' % bases; # print 'line_number: %d' % line_number; #", "which is equal to the reference. is_good = False for", "fp_variant = open(variant_file, \"w\") vcf_file = \"%s-cov_%d.variant.vcf\" % (output_prefix, coverage_threshold)", "VCF format specifies the position where a deletion occurs, with", "variant_list_length = len(ret_variant_list) vcf_list_length = len(ret_vcf_list) num_bases_to_skip = process_mpileup_line( line,", "mpileup. # There are pileup bases that do not have", "bed_chromosome = \"\" bed_pos_start = 0 # bed_pos_end = len(lines);", "= os.path.dirname(alignments_path) if dir_name == \"\": dir_name = \".\" alignments_path_bam", "or original_bases[i] == \",\": bases += ref_base else: bases +=", "should this be handled properly? # Example line from the", "= fp.readlines(); fp_variant = None fp_vcf = None if output_prefix", "elif ( most_common_deletion_count > most_common_insertion_count and most_common_deletion_count > non_indel_coverage_next_base ):", "marking the read start and a char marking the #", "True and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path) ): command = \"samtools mpileup", "of a deletion # consensus), then the deletions on this", "[\"C\", 0], [\"T\", 0], [\"G\", 0]] sorted_base_counts = sorted( list(base_counts.items()),", "Example line from the mpileup file: # gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T", "% (coverage) ref_field = ref_base alt_field = \"%s%s\" % (ref_base,", "non_indel_coverage_current_base? sorted_base_counts = [[\"A\", 0], [\"C\", 0], [\"T\", 0], [\"G\",", "= False else: bed_pos_start = int(bed_pos_split[0]) bed_pos_end = int(bed_pos_split[1]) use_bed", "actual bases that are inserted (these bases follow the current", "else: increase_in_dict(base_counts, bases[i].upper()) i += 1 # TODO: An additional", ") ret_variant_list.append(variant_line) ### Deletions in the VCF format specifies the", "(summary_file) ) return None return None def main( alignments_path, reference_path,", "(non_indel_coverage_current_base < coverage_threshold): if int(coverage) < coverage_threshold: ret_num_undercovered_bases[0] += 1", "BWA-MEM's mpileup. # There are pileup bases that do not", "= %s\\t%s\" % ( position, ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base,", "consensus wins, or the # insertion/deletion count is ambiguous. pass", "sys.stderr.write('Usage:\\n'); # sys.stderr.write('\\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\\n'", "sys.stderr.flush() variant_list_length = len(ret_variant_list) vcf_list_length = len(ret_vcf_list) num_bases_to_skip = process_mpileup_line(", "ends). non_indel_coverage_next_base = ( int(coverage) - end_counts - deletion_count -", "> 0: sorted_insertion_counts = sorted( list(insertion_event_counts.items()), key=operator.itemgetter(1) ) most_common_insertion_count =", "position, ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## return most_common_deletion_length", "if there actually were any insertions (to avoid index out", "reference_path) sys.stderr.write(\"Coverage threshold: %d\\n\" % coverage_threshold) summary_file = process_mpileup( alignments_path,", "bases += ref_base else: bases += original_bases[i] i += 1", "= %s\" % ( position, ref_name, int(coverage), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts),", "{} end_counts = 0 # print 'position: %s' % position;", "num_bases_to_skip -= 1 continue if use_bed == True: line_split =", "written to disk.)\\n'); # exit(1); if len(sys.argv) < 5: sys.stderr.write(\"Usage:\\n\")", "insertion_event_counts = {} deletion_event_counts = {} end_counts = 0 #", "False else: bed_pos_start = int(bed_pos_split[0]) bed_pos_end = int(bed_pos_split[1]) use_bed =", "are a clear winner. if insertion_unique == True: # ret_insertion_count[0]", "% (base_count[0])) # if (int(position) == 100000 or int(position) ==", "common one. if len(list(insertion_event_counts.keys())) > 0: sorted_insertion_counts = sorted( list(insertion_event_counts.items()),", "== most_common_base_count: if base_count[0] == ref_base: is_good = True break", "output ### qual = 1000 info = \"DP=%s;TYPE=snp\" % (coverage)", "total coverage also accounts for the deletions denoted with the", "coverage_threshold): if int(coverage) < coverage_threshold: ret_num_undercovered_bases[0] += 1 # ret_coverage_sum[0]", "\"SNP\\tpos = %s\\tref = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count =", "sorted_insertion_counts[-1][0]) vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field, alt_field,", "= bed_split[1].split(\"-\") if len(bed_pos_split) != 2: use_bed = False else:", "with the given name already exists. if alignments_path_bam_exists == False", "# EDIT: Previously I compared the total coverage of the", "bed_pos_end = int(bed_pos_split[1]) use_bed = True sys.stderr.write(\"Using location specified through", "bases being a deletion event. ### VCF output ### alt_base", "line.strip().split(\"\\t\") if len(split_line) < 5 or len(split_line) > 6: sys.stderr.write(line", "= %d\\tref_base = %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts", "fp_vcf.write( '##INFO=<ID=DP4,Number=4,Type=Integer,Description=\"Counts for ref-forward bases, ref-reverse, alt-forward and alt-reverse bases\">\\n'", "as deletions are counted prior to occuring, and at that", "the total coverage also accounts for the deletions denoted with", "in sorted_deletion_counts ] ) == 1 ) else False )", "\"snp_count: %d\\n\" % ret_snp_count[0] summary_lines += \"insertion_count: %d\\n\" % ret_insertion_count[0]", "fp_collect = None try: fp_collect = open(collective_output_file, \"w\") except IOError:", "deletions (to avoid index out of bounds error). # If", ") fp_vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\") fp_vcf.flush() use_bed = False bed_chromosome = \"\" bed_pos_start", "bases that are inserted (these bases follow the current position).", "\"%s\"\\n' % bed_chromosome) sys.stderr.write(\"\\tStart: %d\\n\" % bed_pos_start) sys.stderr.write(\"\\tEnd: %d\\n\\n\" %", "there are multiple equally good choices. # In this case,", "deletion_count + insertion_count ) > coverage_threshold: # Sanity check, just", "'undercovered1\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count = %d\\tref_base =", "< coverage_threshold or int(coverage) == current_base_deletion_count): # if (non_indel_coverage_current_base <", "% (output_prefix, coverage_threshold) fp_vcf = open(vcf_file, \"w\") fp_vcf.write(\"##fileformat=VCFv4.0\\n\") fp_vcf.write(\"##fileDate=20150409\\n\") fp_vcf.write(\"##source=%s\\n\"", "the numeric entry plus the actual number of bases #", "%d\\n\" % bed_pos_start) sys.stderr.write(\"\\tEnd: %d\\n\\n\" % bed_pos_end) # i =", "line_split[0]; # print bed_chromosome; i += 1 j += 1", "sort - %s\" % ( alignments_path, os.path.splitext(alignments_path_bam)[0], ) sys.stderr.write(command +", "if len(bed_pos_split) != 2: use_bed = False else: bed_pos_start =", "one by one # through the '*' character. # Get", "most_common_insertion_length; ret_insertion_count[0] += 1 ret_num_called_bases[0] += most_common_insertion_length # variant_line =", "if fp_vcf != None: fp_vcf.close() summary_lines = \"\" summary_lines +=", "e: pass # sys.stderr.write(str(e) + '\\n'); # sys.stderr.write('sorted_base_counts:\\n'); # sys.stderr.write(str(sorted_base_counts)", "format specifies the position where a deletion occurs, with the", "1 except: dict_counter[value] = 1 def process_mpileup_line( line, line_number, ret_variant_list,", "end_counts - deletion_count - insertion_count ) if ( non_indel_coverage_next_base +", "%s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\" % ( position, ref_name, non_indel_coverage_next_base,", "1 down there. elif base == r\"$\": # This marks", "the right of report indel position\">\\n' ) fp_vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\") fp_vcf.flush() use_bed", "which had a '*' at the current position (because we", "= \"\" bed_pos_start = 0 # bed_pos_end = len(lines); bed_pos_end", ") sys.stderr.write(command + \"\\n\") subprocess.call(command, shell=\"True\") # Create the BAM", "): # Split the line, and perform a sanity check.", "% ( position, ref_name, int(coverage), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), ) )", "alt_base = ( (\"{}\") if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0]))", "variant_line = ( \"ins\\tpos = %s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr", "%d, insertions = %d, deletions = %d, undercovered = %d,", "(line.strip(), str(base_counts))) # EDIT: Previously I compared the total coverage", "%s.bai\" % ( alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\") elif alignments_path.endswith(\"bam\")", "% ( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count, ref_base, temp_sorted_bc, str(sorted_base_counts),", "alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\") sys.stderr.write('Processing file \"%s\"...\\n' % alignments_path)", "\"\\n\") fp_variant.flush() if len(ret_vcf_list) > vcf_list_length and fp_vcf != None:", "or int(position) == 4000000): # print '\\nTEST\\tpos = %s\\tcoverage =", "also accounts for the deletions denoted with the '*' sign,", "if dir_name == \"\": dir_name = \".\" alignments_path_bam = (", "(j - i) + num_bases - 1 deletion_count += 1", "if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(),", "are deletions, get the most common one. if len(list(deletion_event_counts.keys())) >", "\"%s\" for reading!\\n' % summary_file ) continue fp_collect.write(\"\".join(lines) + \"\\n\")", "file will be generated. dir_name = os.path.dirname(alignments_path) if dir_name ==", "= process_mpileup_line( line, i, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases,", "most_common_deletion_length else: # In this case, either the base count", "avoid index out of bounds error). # If there are", "ret_num_called_bases[0] summary_lines += \"num_correct_bases: %d\\n\" % ret_num_correct_bases[0] summary_lines += \"average_coverage:", "coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ): fp = None try: fp", "Put a different coverage threshold. Here we are interested even", "\"r\") except IOError: sys.stderr.write( 'ERROR: Could not open file \"%s\"", "% bases; # print 'line_number: %d' % line_number; # print", "= %d\\tmost_common_base_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts", "return most_common_deletion_length; variant_line = ( \"del\\tpos = %s\\tref = %s\\tnon_indel_cov_next", "%d\\n\" % ret_snp_count[0] summary_lines += \"insertion_count: %d\\n\" % ret_insertion_count[0] summary_lines", "alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## else: sys.stderr.write( \"\\nWarning: a", "r\"^\": # This is the starting position of a read.", "\"insertion_count: %d\\n\" % ret_insertion_count[0] summary_lines += \"deletion_count: %d\\n\" % ret_deletion_count[0]", "about the bases, # and we need to make an", "not os.path.exists(os.path.dirname(output_prefix)): os.makedirs(os.path.dirname(output_prefix)) variant_file = \"%s-cov_%d.variant.csv\" % (output_prefix, coverage_threshold) fp_variant", "fp_sum = open(summary_file, \"w\") fp_sum.write(summary_lines) fp_sum.close() return summary_file except IOError:", "> os.path.getmtime(mpileup_path) ): command = \"samtools mpileup -B -d 1000000", "because we count deletions one by one # through the", "'*' symbols. How should this be handled properly? # Example", "a deletion event. If we wound up at this base", "# I chose to handle them as undercovered bases. non_indel_coverage_current_base", "( sum( [ int(deletion_count[1] == most_common_deletion_count) for deletion_count in sorted_deletion_counts", "SNP was detected, but there were no bases in the", "[ int(deletion_count[1] == most_common_deletion_count) for deletion_count in sorted_deletion_counts ] )", "elif base == r\"$\": # This marks the end of", "subprocess.call(command, shell=\"True\") sys.stderr.write('Processing file \"%s\"...\\n' % alignments_path) sys.stderr.write('Reference file \"%s\"...\\n'", "sys.stderr.write(command + \"\\n\") subprocess.call(command, shell=\"True\") # Create the BAM index", "i += 1 # TODO: An additional problematic case, discovered", "i += skip_bases else: increase_in_dict(base_counts, bases[i].upper()) i += 1 #", "# Split the line, and perform a sanity check. split_line", "0: sys.stderr.write( \"\\r[%d] snps = %d, insertions = %d, deletions", "= sorted_base_counts[-1][1] except Exception as e: pass # sys.stderr.write(str(e) +", "variant_line = ( \"del\\tpos = %s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr", "base == r\"-\": # This marks the occurance of deletions.", "( ref_name, position, ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ##################", "True if ( sum( [ int(deletion_count[1] == most_common_deletion_count) for deletion_count", "prefix_for_intermediate_results, collective_output_file ): fp_collect = None try: fp_collect = open(collective_output_file,", "a '*' at the current position (because we don't know", "read start and a char marking the # mapping quality", "fp_vcf.flush() use_bed = False bed_chromosome = \"\" bed_pos_start = 0", "% ( i, ret_snp_count[0], ret_insertion_count[0], ret_deletion_count[0], ret_num_undercovered_bases[0], (float(ret_coverage_sum[0]) / float((i", "length to the right of report indel position\">\\n' ) fp_vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\")", "ret_num_called_bases[0] += most_common_insertion_length # variant_line = 'insertion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_insertion_count, str(sorted_base_counts),", "= 0; # Allow for the case where there are", "# through the '*' character. # Get the number of", "os.path.getmtime(alignments_path) > os.path.getmtime(alignments_path_bam) ): # Convert the SAM file to", "ref_name = split_line[0] position = split_line[1] ref_base = split_line[2] coverage", "# print 'position: %s' % position; # print 'bases: \"%s\"'", "we need to make an allele aware count. # Get", "fp_variant = None fp_vcf = None if output_prefix != \"\":", "sam_files, prefix_for_intermediate_results, collective_output_file ): fp_collect = None try: fp_collect =", "we don't know where it ends). non_indel_coverage_next_base = ( int(coverage)", "= i + 1 while bases[j] in \"0123456789\": j +=", "### VCF output ### qual = 1000 info = \"DP=%s;TYPE=snp\"", "where a deletion occurs, with the first base being non-deletion,", "1 # variant_line = 'deletion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts),", "+= 1 # ret_variant_list.append(line_number); variant_line = ( \"SNP\\tpos = %s\\tref", "by one # through the '*' character. # Get the", "-= 1 continue if use_bed == True: line_split = line.strip().split(\"\\t\")", "# Allow for the case where there are multiple equally", "the length of the numeric entry plus the actual number", "%s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_insertion_count = %d\\tref_base = %s\\tcons_base =", "ref_name, int(coverage), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), ) ) ret_variant_list.append(variant_line) ### VCF", "False ) else: most_common_deletion_count = 0 most_common_deletion_length = 0 deletion_unique", "# print '\\nTEST\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count =", "\"/\" + os.path.splitext(os.path.basename(alignments_path))[0] + \".bam\" ) alignments_path_bam_exists = os.path.exists(alignments_path_bam) #", "bed_chromosome: current_pos = int(line_split[1]) if current_pos < bed_pos_start or current_pos", "( True if ( sum( [ int(deletion_count[1] == most_common_deletion_count) for", "= 'insertion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line);", "the file, and the correctness of its extension. # Also,", "sys.stderr.write(summary_lines + \"\\n\") sys.stderr.write(\"\\n\") if output_prefix != \"\": # summary_file", "# sys.stderr.write('\\t(If <collective_output_file> is equal to \"-\", no files will", "number of the inserted bases # and the actual bases", "dir_name + \"/\" + os.path.splitext(os.path.basename(alignments_path))[0] + \".bam\" ) alignments_path_bam_exists =", "os.path.splitext(os.path.basename(alignments_path))[0] + \".bam\" ) alignments_path_bam_exists = os.path.exists(alignments_path_bam) # Check if", "): # Sanity checking the existence of the file, and", "it doesn't exist yet. mpileup_path = \"%s.mpileup\" % alignments_path_bam mpileup_exists", "current_base_deletion_count = 0 deletion_count = 0 insertion_event_counts = {} deletion_event_counts", "a read. It encodes two # symbols: '^' marking the", "numeric entry plus the actual number of bases # that", "in sam_files: summary_file = prefix_for_intermediate_results + \".sum\" try: fp_sum =", "os.path.splitext(alignments_path_bam)[0], ) sys.stderr.write(command + \"\\n\") subprocess.call(command, shell=\"True\") # Create the", "case, we prefer the choice which is equal to the", "alignments_path_bam, ) subprocess.call(command, shell=\"True\") elif alignments_path.endswith(\"bam\") == False: sys.stderr.write( 'ERROR:", "+= 1 j += 1 continue if thread_id == 0:", "# and the actual bases that are inserted (these bases", "winner. if deletion_unique == True: # ret_deletion_count[0] += most_common_deletion_length; ret_deletion_count[0]", "I count total coverage of this base, or the non_indel_coverage_current_base?", "### VCF output ### alt_base = ( (\"{}\") if (len(sorted_base_counts)", "bed_pos_start = int(bed_pos_split[0]) bed_pos_end = int(bed_pos_split[1]) use_bed = True sys.stderr.write(\"Using", "www.sovic.org # # Creates a pileup from a given SAM/BAM", "get skipped because of a deletion # consensus), then the", "to the reference. is_good = False for base_count in sorted_base_counts:", "alignments_path_bam_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(alignments_path_bam) ): # Convert", "not open file \"%s\" for reading!\\n' % mpileup_path ) return", "# This marks the end of a read. end_counts +=", "None fp_vcf = None if output_prefix != \"\": if not", "# sys.stderr.write(str(base_counts) + '\\n'); # sys.stderr.write('original_bases:\\n'); # sys.stderr.write(str(original_bases) + '\\n');", "is already decided if there is going # to be", "Handling base consensus. sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1) ) try:", "the current base with the coverage threshold. # However, the", "= %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\\n' % (position, int(coverage), non_indel_coverage_current_base,", "[position]\\n\" % sys.argv[0] ) sys.stderr.write( '\\t(If <collective_output_file> is equal to", "int(position) == 3000000 or int(position) == 4000000): # print '\\nTEST\\tpos", "summary_lines += \"snp_count: %d\\n\" % ret_snp_count[0] summary_lines += \"insertion_count: %d\\n\"", "summary_file = \"%s-cov_%d.variant.sum\" % (output_prefix, coverage_threshold) try: fp_sum = open(summary_file,", "ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## else: sys.stderr.write( \"\\nWarning:", "\"%s\" does not exist!\\n' % alignments_path) return if alignments_path.endswith(\"sam\"): #", "'##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description=\"Indicates that the variant is a consensus variant (as opposed", "File extension needs to be either .sam or .bam! Input", "a SNP was detected, but there were no bases in", "This marks the occurance of an insertion. It is a", "that are inserted (these bases follow the current position). #", "summary_lines += \"mpileup_file: %s\\n\" % mpileup_path summary_lines += \"coverage_threshold: %d\\n\"", "= {} insertion_count = 0 current_base_deletion_count = 0 deletion_count =", "I chose to handle them as undercovered bases. non_indel_coverage_current_base =", "sys.stderr.write('\\tChromosome: \"%s\"\\n' % bed_chromosome) sys.stderr.write(\"\\tStart: %d\\n\" % bed_pos_start) sys.stderr.write(\"\\tEnd: %d\\n\\n\"", "read. end_counts += 1 elif base == r\"*\": # This", "% 1000) == 0: sys.stderr.write( \"\\r[%d] snps = %d, insertions", "1 deletion_count += 1 deletion = bases[j : (j +", "= %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\"", "num_bases_to_skip = process_mpileup_line( line, i, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count,", "(ref_base)) sys.stdout.write(\"Consensus base: %s\\n\\n\" % (base_count[0])) # if (int(position) ==", "line.strip()); # ret_variant_list.append(variant_line); variant_line = ( \"undercovered1\\tpos = %s\\tref =", "- current_base_deletion_count if verbose == True: sys.stdout.write(\"%s\\nbase_counts: %s\\n\" % (line.strip(),", "coverage also accounts for the deletions denoted with the '*'", "through commandline:\\n\") sys.stderr.write('\\tChromosome: \"%s\"\\n' % bed_chromosome) sys.stderr.write(\"\\tStart: %d\\n\" % bed_pos_start)", "clear winner. if deletion_unique == True: # ret_deletion_count[0] += most_common_deletion_length;", "a char marking the # mapping quality of the read.", "of an insertion. It is a composite object # consisting", "+= \"snp_count: %d\\n\" % ret_snp_count[0] summary_lines += \"insertion_count: %d\\n\" %", "sys.argv[3] sam_file = sys.argv[4] bed_position = \"\" if len(sys.argv) >", "+= num_bases_to_skip i += 1 j += 1 # if", "%s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count = %d\\tref_base = %s\\tcons_base =", "1 j += 1 continue else: # print line_split[0]; #", "print '\\nTEST\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count = %d\\tref_base", "int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); #", "be either .sam or .bam! Input file path: \"%s\".\\n' %", "# if (i > 10000): # break; fp.close() sys.stderr.write(\"\\n\") if", "> 0: sorted_deletion_counts = sorted( list(deletion_event_counts.items()), key=operator.itemgetter(1) ) most_common_deletion_count =", "): # Convert the SAM file to a sorted BAM", "sorted_insertion_counts ] ) == 1 ) else False ) else:", "the inserted bases # and the actual bases that are", ") else False ) else: most_common_insertion_count = 0 most_common_insertion_length =", "# lines = fp.readlines(); fp_variant = None fp_vcf = None", "( alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\") elif alignments_path.endswith(\"bam\") == False:", "coverage_threshold <output_prefix> <{sb}am_file_> [position]\\n\" % sys.argv[0] ) sys.stderr.write( '\\t(If <collective_output_file>", "ret_vcf_list.append(vcf_line) ################## else: ret_num_called_bases[0] += 1 ret_coverage_sum[0] += int(coverage) #", "= sys.argv[1] coverage_threshold = int(sys.argv[2]) output_prefix = sys.argv[3] sam_file =", "bed_pos_end): # len(lines)): num_bases_to_skip = 0 for line in fp:", "base consensus. sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1) ) try: most_common_base_count", "info, ) ret_vcf_list.append(vcf_line) ################## else: sys.stderr.write( \"\\nWarning: a SNP was", "SAM/BAM file, and calls consensus bases (or variants). import os", "it ends). non_indel_coverage_next_base = ( int(coverage) - end_counts - deletion_count", "sorted_deletion_counts[-1][0]) alt_field = ref_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name,", "0 while i < len(original_bases): if original_bases[i] == \".\" or", "float((i + 1))) ) sys.stderr.write(summary_lines + \"\\n\") sys.stderr.write(\"\\n\") if output_prefix", "( (\"{}\") if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])) ) qual", "(coverage) ref_field = ref_base alt_field = alt_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\"", "int(position) == 1000000 or int(position) == 2000000 or int(position) ==", "summary_lines += \"num_undercovered_bases: %d\\n\" % ret_num_undercovered_bases[0] summary_lines += \"num_called_bases: %d\\n\"", "info, ) ret_vcf_list.append(vcf_line) ################## else: ret_num_called_bases[0] += 1 ret_coverage_sum[0] +=", "== True and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path) ): command = \"samtools", "ambiguous. pass return 0 def process_mpileup( alignments_path, reference_path, mpileup_path, coverage_threshold,", "0 -A -f %s %s > %s.mpileup\" % ( reference_path,", "% summary_file ) continue fp_collect.write(\"\".join(lines) + \"\\n\") fp_collect.close() if __name__", "deletion occurs, with the first base being non-deletion, and the", "variant (as opposed to a low frequency variant).\">\\n' ) fp_vcf.write(", "sys.stderr.write(\"\\tStart: %d\\n\" % bed_pos_start) sys.stderr.write(\"\\tEnd: %d\\n\\n\" % bed_pos_end) # i", "\"%s\" for reading!\\n' % mpileup_path ) return None ret_variant_list =", "len(sorted_base_counts) > 0: ret_snp_count[0] += 1 # ret_variant_list.append(line_number); variant_line =", "character '-', the number of the deleted bases # and", "does not exist!\\n' % alignments_path) return if alignments_path.endswith(\"sam\"): # Determine", "of report indel position\">\\n' ) fp_vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\") fp_vcf.flush() use_bed = False", "+= 1 base_counts = {} insertion_count = 0 current_base_deletion_count =", "most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); variant_line", "0: sorted_insertion_counts = sorted( list(insertion_event_counts.items()), key=operator.itemgetter(1) ) most_common_insertion_count = sorted_insertion_counts[-1][1]", "a consensus variant (as opposed to a low frequency variant).\">\\n'", "!= \"\": # summary_file = output_prefix + '.conssum'; summary_file =", "%s\\n\" % (line.strip(), str(base_counts))) # EDIT: Previously I compared the", "else: ret_num_correct_bases[0] += 1 if verbose == True: sys.stdout.write(\"Reference base:", "\"samtools index %s %s.bai\" % ( alignments_path_bam, alignments_path_bam, ) subprocess.call(command,", "i += skip_bases elif base == r\"+\": # This marks", "mpileup_exists == False or ( mpileup_exists == True and os.path.getmtime(alignments_path)", "pileup bases that do not have any actual bases, but", "\"\\nWarning: a SNP was detected, but there were no bases", "In this case, insertions are a clear winner. if insertion_unique", "+= \"deletion_count: %d\\n\" % ret_deletion_count[0] summary_lines += \"num_undercovered_bases: %d\\n\" %", "<{sb}am_file_> [position]\\n\" % sys.argv[0] ) sys.stderr.write( '\\t(If <collective_output_file> is equal", "== False or ( mpileup_exists == True and os.path.getmtime(alignments_path) >", "sys.stderr.write(\"Using location specified through commandline:\\n\") sys.stderr.write('\\tChromosome: \"%s\"\\n' % bed_chromosome) sys.stderr.write(\"\\tStart:", "j = i + 1 while bases[j] in \"0123456789\": j", "if len(line_split) > 2 and line_split[0] == bed_chromosome: current_pos =", "ignore this case, because we count deletions one by one", "%d\\n\" % ret_insertion_count[0] summary_lines += \"deletion_count: %d\\n\" % ret_deletion_count[0] summary_lines", "# Sanity check, just to see if there actually were", "# ret_variant_list.append(variant_line); try: temp_sorted_bc = sorted_base_counts[-1][0] except: temp_sorted_bc = 0", "str(deletion_event_counts), line.strip(), ) ) sys.stderr.write(\"\\n\") else: ret_num_correct_bases[0] += 1 if", "It encodes two # symbols: '^' marking the read start", "need to be skipped in the string. j = i", "alt_field = \"N\" vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position,", "\"alignments_file: %s\\n\" % alignments_path summary_lines += \"mpileup_file: %s\\n\" % mpileup_path", ") > coverage_threshold: # Sanity check, just to see if", "SAM file to a sorted BAM file. command = \"samtools", "0 most_common_deletion_length = 0 deletion_unique = False if ( most_common_insertion_count", "= [] ret_vcf_list = [] ret_snp_count = [0] ret_insertion_count =", "position). # In our approach, we ignore this case, because", "Skip the length of the numeric entry plus the actual", "+ os.path.splitext(os.path.basename(alignments_path))[0] + \".bam\" ) alignments_path_bam_exists = os.path.exists(alignments_path_bam) # Check", "insertion_unique == True: # ret_insertion_count[0] += most_common_insertion_length; ret_insertion_count[0] += 1", "%d\\tmost_common_deletion_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts =", "fp_vcf != None: fp_vcf.write(\"\\n\".join(ret_vcf_list[vcf_list_length:]) + \"\\n\") fp_vcf.flush() i += num_bases_to_skip", "variant is an INDEL.\">\\n' ) fp_vcf.write( '##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description=\"Indicates that the variant", "== r\"+\": # This marks the occurance of an insertion.", "detected, but there were no bases in the sorted_base_counts!\" )", "is the starting position of a read. It encodes two", "== most_common_deletion_count) for deletion_count in sorted_deletion_counts ] ) == 1", "ins, del, mnp, complex)\">\\n' ) fp_vcf.write( '##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele Frequency\">\\n' ) fp_vcf.write(", ") ) ret_variant_list.append(variant_line) ### Insertions in the VCF format specifies", "= \"N\" vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field,", "insertion_count = 0 current_base_deletion_count = 0 deletion_count = 0 insertion_event_counts", "process_mpileup_line( line, i, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases,", "fp_vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\") fp_vcf.flush() use_bed = False bed_chromosome = \"\" bed_pos_start =", "True: sys.stdout.write(\"Reference base: %s\\n\" % (ref_base)) sys.stdout.write(\"Consensus base: %s\\n\\n\" %", "# and we need to make an allele aware count.", "Determine the path where the new BAM file will be", "the deletion marking, but here we actually care about the", "< bed_pos_end): # len(lines)): num_bases_to_skip = 0 for line in", "str(deletion_event_counts), line.strip()); ### Handling indel consensus. ### Put a different", "= [0] # lines = fp.readlines(); fp_variant = None fp_vcf", "+ num_bases - 1 insertion_count += 1 insertion = bases[j", "ret_snp_count[0], ret_insertion_count[0], ret_deletion_count[0], ret_num_undercovered_bases[0], (float(ret_coverage_sum[0]) / float((i + 1))), )", "ret_insertion_count[0] += most_common_insertion_length; ret_insertion_count[0] += 1 ret_num_called_bases[0] += most_common_insertion_length #", "ret_deletion_count = [0] ret_num_undercovered_bases = [0] ret_num_called_bases = [0] ret_num_correct_bases", "extension. # Also, if input file is a SAM file,", "# In this case, we prefer the choice which is", "character '+', the number of the inserted bases # and", "ret_coverage_sum, coverage_threshold, verbose=False, ): # Split the line, and perform", ") fp_vcf.write( '##INFO=<ID=DP4,Number=4,Type=Integer,Description=\"Counts for ref-forward bases, ref-reverse, alt-forward and alt-reverse", "+ 1) : j]) skip_bases = (j - i) +", "position where a insertion occurs. The ref position should contain", "'\\n'); # most_common_base_count = 0; # Allow for the case", "T 20 ******************** 8,2*#-;)$B>2$1&D- # I chose to handle them", "different coverage threshold. Here we are interested even in the", "fp_vcf.write(\"##reference=%s\\n\" % reference_path) fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw Depth\">\\n') fp_vcf.write( '##INFO=<ID=TYPE,Number=A,Type=String,Description=\"Type of each allele", "2015. www.sovic.org # # Creates a pileup from a given", "position, ref_name, int(coverage), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), ) ) ret_variant_list.append(variant_line) ###", "= open(vcf_file, \"w\") fp_vcf.write(\"##fileformat=VCFv4.0\\n\") fp_vcf.write(\"##fileDate=20150409\\n\") fp_vcf.write(\"##source=%s\\n\" % (\" \".join(sys.argv))) fp_vcf.write(\"##reference=%s\\n\"", "+= \"coverage_threshold: %d\\n\" % coverage_threshold summary_lines += \"snp_count: %d\\n\" %", "j += 1 # if (i > 10000): # break;", "summary_lines += \"alignments_file: %s\\n\" % alignments_path summary_lines += \"mpileup_file: %s\\n\"", "variant is a consensus variant (as opposed to a low", "[] ret_snp_count = [0] ret_insertion_count = [0] ret_deletion_count = [0]", "we prefer the choice which is equal to the reference.", "split_line[4] if len(split_line) == 6: qualities = split_line[5] bases =", "summary_lines += \"coverage_threshold: %d\\n\" % coverage_threshold summary_lines += \"snp_count: %d\\n\"", "\"\": bed_split = bed_position.split(\":\") if len(bed_split) != 2: use_bed =", "+= skip_bases elif base == r\"+\": # This marks the", "alt-forward and alt-reverse bases\">\\n' ) fp_vcf.write( '##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant", "actual reference base. i = 0 while i < len(original_bases):", "== True: line_split = line.strip().split(\"\\t\") if len(line_split) > 2 and", "correctness of its extension. # Also, if input file is", "alt_field = \"%s%s\" % (ref_base, sorted_insertion_counts[-1][0]) vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" %", "non_indel_coverage_current_base, most_common_base_count, ref_base, (\"{}\") if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])),", "1) : j]) skip_bases = (j - i) + num_bases", "(float(ret_coverage_sum[0]) / float((i + 1))), ) ) sys.stderr.flush() variant_list_length =", "TODO: An additional problematic case, discovered this on 03.11.2014., when", "list(deletion_event_counts.items()), key=operator.itemgetter(1) ) most_common_deletion_count = sorted_deletion_counts[-1][1] most_common_deletion_length = len(sorted_deletion_counts[-1][0]) deletion_unique", "\".\" or original_bases[i] == \",\": bases += ref_base else: bases", "main( alignments_path, reference_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ): # Sanity", "string. j = i + 1 while bases[j] in \"0123456789\":", ") exit(1) reference_file = sys.argv[1] coverage_threshold = int(sys.argv[2]) output_prefix =", "= %d\\tmost_common_deletion_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts", "= 0 while i < len(bases): base = bases[i] if", "file path: \"%s\".\\n' % alignments_path ) return # Convert the", "reference_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ): # Sanity checking the", "command = \"samtools index %s %s.bai\" % ( alignments_path_bam, alignments_path_bam,", "%s\\tdeletion_counts = %s\\t%s\" % ( position, ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count,", "- i) + num_bases - 1 insertion_count += 1 insertion", "sys.stderr.write('Processing file \"%s\"...\\n' % alignments_path) sys.stderr.write('Reference file \"%s\"...\\n' % reference_path)", "base: %s\\n\\n\" % (base_count[0])) # if (int(position) == 100000 or", "count deletions one by one # through the '*' character.", "fp_vcf.write( '##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant is an INDEL.\">\\n' ) fp_vcf.write(", "following bases being a deletion event. ### VCF output ###", "of the read. # increase_in_dict(base_counts, bases[i + 1].upper()); i +=", "% alignments_path ) return # Convert the sorted BAM file", "= \".\" alignments_path_bam = ( dir_name + \"/\" + os.path.splitext(os.path.basename(alignments_path))[0]", "the '.' and ',' signs with the actual reference base.", "= %s\\tref = %s\\tcoverage = %d\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts", "(len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), )", ") ret_vcf_list.append(vcf_line) ################## return most_common_deletion_length else: # In this case,", "symbols. How should this be handled properly? # Example line", "indel position\">\\n' ) fp_vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\") fp_vcf.flush() use_bed = False bed_chromosome =", "there are deletions, get the most common one. if len(list(deletion_event_counts.keys()))", "%s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_deletion_count = %d\\tref_base = %s\\tcons_base =", "a deletion event. ### VCF output ### alt_base = (", "ref_base alt_field = alt_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name,", "ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=use_bed, ) if", "= 0 most_common_deletion_length = 0 deletion_unique = False if (", "reference_path, (\"%s.mpileup\" % alignments_path_bam), coverage_threshold, output_prefix, thread_id, bed_position, ) def", "the read. # increase_in_dict(base_counts, bases[i + 1].upper()); i += 1", "the new BAM file will be generated. dir_name = os.path.dirname(alignments_path)", "fp.close() sys.stderr.write(\"\\n\") if fp_variant != None: fp_variant.close() if fp_vcf !=", "qual = 1000 info = \"DP=%s;TYPE=snp\" % (coverage) ref_field =", "this case, deletions are a clear winner. if deletion_unique ==", "count total coverage of this base, or the non_indel_coverage_current_base? most_common_base_count", "len(line_split) > 2 and line_split[0] == bed_chromosome: current_pos = int(line_split[1])", "...]\\n' % sys.argv[0]); # sys.stderr.write('\\t(If <collective_output_file> is equal to \"-\",", "ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=False, ): # Split the line, and", "BAM file to a mpileup file if it doesn't exist", "specifying \"chromosome:start-end\"\\n\\n' ) exit(1) reference_file = sys.argv[1] coverage_threshold = int(sys.argv[2])", "sys.stderr.write('sorted_base_counts:\\n'); # sys.stderr.write(str(sorted_base_counts) + '\\n'); # sys.stderr.write('base_counts:\\n'); # sys.stderr.write(str(base_counts) +", "base being non-deletion, and the following bases being a deletion", "# Creates a pileup from a given SAM/BAM file, and", "elif base == r\"+\": # This marks the occurance of", "equal to \"-\", no files will be written to disk.)\\n');", "1, because we have i += 1 down there. elif", "and fp_variant != None: fp_variant.write(\"\\n\".join(ret_variant_list[variant_list_length:]) + \"\\n\") fp_variant.flush() if len(ret_vcf_list)", "the # insertion/deletion count is ambiguous. pass return 0 def", "deletions denoted with the '*' sign, which I think #", "The ref position should contain the base which is the", "generated. dir_name = os.path.dirname(alignments_path) if dir_name == \"\": dir_name =", "qual = 1000 info = \"DP=%s;TYPE=ins\" % (coverage) ref_field =", "ref_field = \"%s%s\" % (ref_base, sorted_deletion_counts[-1][0]) alt_field = ref_base vcf_line", "bases that need to be skipped in the string. j", "i, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum,", "is going # to be a deletion event. If we", "%d\\tnon_indel_cov_curr = %d\\tmost_common_insertion_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts =", "(as opposed to a low frequency variant).\">\\n' ) fp_vcf.write( '##INFO=<ID=HRUN,Number=1,Type=Integer,Description=\"Homopolymer", "if fp_variant != None: fp_variant.close() if fp_vcf != None: fp_vcf.close()", "alignments_path) sys.stderr.write('Reference file \"%s\"...\\n' % reference_path) sys.stderr.write(\"Coverage threshold: %d\\n\" %", "qual, info, ) ret_vcf_list.append(vcf_line) ################## elif ( most_common_deletion_count > most_common_insertion_count", "( int(coverage) - end_counts - deletion_count - insertion_count ) if", "1 if verbose == True: sys.stdout.write(\"Reference base: %s\\n\" % (ref_base))", "len(ret_vcf_list) num_bases_to_skip = process_mpileup_line( line, i, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count,", "ret_num_correct_bases[0] summary_lines += \"average_coverage: %.2f\\n\" % ( (float(ret_coverage_sum[0]) / float((i", "%d\\n\" % ret_num_undercovered_bases[0] summary_lines += \"num_called_bases: %d\\n\" % ret_num_called_bases[0] summary_lines", "event. If we wound up at this base (i.e. this", "most_common_base_count = 0 pass # variant_line = 'undercovered1\\tpos = %s\\tcoverage", "int(coverage) - end_counts - deletion_count - insertion_count ) if (", "this on 03.11.2014., when analyzing BWA-MEM's mpileup. # There are", ") else: most_common_deletion_count = 0 most_common_deletion_length = 0 deletion_unique =", "% alignments_path) return if alignments_path.endswith(\"sam\"): # Determine the path where", "written to disk.)\\n' ) sys.stderr.write( '\\tPosition parameter is a string", "equal to \"-\", no files will be written to disk.)\\n'", "mpileup_path summary_lines += \"coverage_threshold: %d\\n\" % coverage_threshold summary_lines += \"snp_count:", "summary_lines += \"insertion_count: %d\\n\" % ret_insertion_count[0] summary_lines += \"deletion_count: %d\\n\"", "%s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\" % ( position, ref_name, int(coverage),", "if (int(coverage) < coverage_threshold or int(coverage) == current_base_deletion_count): # if", "(\"{}\") if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts),", "ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_deletion_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(),", "fp_sum.close() except IOError: sys.stderr.write( 'ERROR: Could not open file \"%s\"", "Allow for the case where there are multiple equally good", "else False ) else: most_common_insertion_count = 0 most_common_insertion_length = 0", "non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); ### Handling", "alignments_path_bam), coverage_threshold, output_prefix, thread_id, bed_position, ) def CollectSummaries( sam_files, prefix_for_intermediate_results,", "i = 0 if (use_bed == False) else max((bed_pos_start -", "skipped in the string. j = i + 1 while", "# if (int(position) == 100000 or int(position) == 1000000 or", "== True: # ret_deletion_count[0] += most_common_deletion_length; ret_deletion_count[0] += 1 #", "split_line[5] bases = \"\" # Replace the '.' and ','", "0 # while (i < bed_pos_end): # len(lines)): num_bases_to_skip =", "processes = [] if output_prefix == \"-\": output_prefix = os.path.splitext(sam_file)[0]", "= ( \"SNP\\tpos = %s\\tref = %s\\tcoverage = %d\\tnon_indel_cov_curr =", "= ( \"del\\tpos = %s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr =", "total coverage of this base, or the non_indel_coverage_current_base? sorted_base_counts =", "a string specifying \"chromosome:start-end\"\\n\\n' ) exit(1) reference_file = sys.argv[1] coverage_threshold", "summary_file ) continue fp_collect.write(\"\".join(lines) + \"\\n\") fp_collect.close() if __name__ ==", "coverage_threshold or int(coverage) == current_base_deletion_count): # if (non_indel_coverage_current_base < coverage_threshold):", "Copyright <NAME>, 2015. www.sovic.org # # Creates a pileup from", "coverage_threshold, output_prefix, 0, bed_position) # if (output_prefix != '-'): #", "fp_vcf = None if output_prefix != \"\": if not os.path.exists(os.path.dirname(output_prefix)):", "of deletions. It is a composite object # consisting of:", "+ num_bases - 1 deletion_count += 1 deletion = bases[j", "== False or ( alignments_path_bam_exists == True and os.path.getmtime(alignments_path) >", "== False) else max((bed_pos_start - 10), 0) j = 0", "def increase_in_dict(dict_counter, value): try: dict_counter[value] += 1 except: dict_counter[value] =", "# This is the starting position of a read. It", "'ERROR: File extension needs to be either .sam or .bam!", "{} insertion_count = 0 current_base_deletion_count = 0 deletion_count = 0", "is equal to \"-\", no files will be written to", "[ int(insertion_count[1] == most_common_insertion_count) for insertion_count in sorted_insertion_counts ] )", "a clear winner. if insertion_unique == True: # ret_insertion_count[0] +=", "1 # Increase only by 1, because we have i", "variant_file = \"%s-cov_%d.variant.csv\" % (output_prefix, coverage_threshold) fp_variant = open(variant_file, \"w\")", "i += 1 down there. elif base == r\"$\": #", "% reference_path) sys.stderr.write(\"Coverage threshold: %d\\n\" % coverage_threshold) summary_file = process_mpileup(", "think # isn't relevant, as deletions are counted prior to", "True sys.stderr.write(\"Using location specified through commandline:\\n\") sys.stderr.write('\\tChromosome: \"%s\"\\n' % bed_chromosome)", "% sys.argv[0] ) sys.stderr.write( '\\t(If <collective_output_file> is equal to \"-\",", "most_common_insertion_length = len(sorted_insertion_counts[-1][0]) insertion_unique = ( True if ( sum(", "and we need to make an allele aware count. #", "Exception as e: pass # sys.stderr.write(str(e) + '\\n'); # sys.stderr.write('sorted_base_counts:\\n');", "ret_variant_list = [] ret_vcf_list = [] ret_snp_count = [0] ret_insertion_count", "2: use_bed = False else: bed_chromosome = bed_split[0] bed_pos_split =", "0) j = 0 # while (i < bed_pos_end): #", "<output_prefix> <{sb}am_file_> [position]\\n\" % sys.argv[0] ) sys.stderr.write( '\\t(If <collective_output_file> is", "a clear winner. if deletion_unique == True: # ret_deletion_count[0] +=", "output_prefix = sys.argv[3] sam_file = sys.argv[4] bed_position = \"\" if", "most_common_insertion_count = 0 most_common_insertion_length = 0 insertion_unique = False #", "+ num_bases)].upper() increase_in_dict(insertion_event_counts, insertion) i += skip_bases else: increase_in_dict(base_counts, bases[i].upper())", "# Convert the SAM file to a sorted BAM file.", "ret_insertion_count = [0] ret_deletion_count = [0] ret_num_undercovered_bases = [0] ret_num_called_bases", "non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line);", "deletion_event_counts = {} end_counts = 0 # print 'position: %s'", "fp_vcf.write( '##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description=\"Indicates that the variant is a consensus variant (as", "at that point is already decided if there is going", "i += 1 j += 1 continue if thread_id ==", "num_bases - 1 insertion_count += 1 insertion = bases[j :", "Could not open file \"%s\" for reading!\\n' % mpileup_path )", ") fp_vcf.write( '##INFO=<ID=SB,Number=1,Type=Integer,Description=\"Phred-scaled strand bias at this position\">\\n' ) fp_vcf.write(", "= 0 # while (i < bed_pos_end): # len(lines)): num_bases_to_skip", "IOError: sys.stderr.write( 'ERROR: Could not open file \"%s\" for writing!\\n'", "ref base + the insertion event. ### VCF output ###", "the given name already exists. if alignments_path_bam_exists == False or", "5: bed_position = sys.argv[5] # sys.stderr.write('bed_position: \"%s\"\\n\\n' % bed_position); processes", "# mapping quality of the read. # increase_in_dict(base_counts, bases[i +", "1 j += 1 # if (i > 10000): #", "%d\\n\" % ret_deletion_count[0] summary_lines += \"num_undercovered_bases: %d\\n\" % ret_num_undercovered_bases[0] summary_lines", "%s\" % ( alignments_path, os.path.splitext(alignments_path_bam)[0], ) sys.stderr.write(command + \"\\n\") subprocess.call(command,", "ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=use_bed, ) if len(ret_variant_list)", "len(sorted_deletion_counts[-1][0]) deletion_unique = ( True if ( sum( [ int(deletion_count[1]", "= 0 if (use_bed == False) else max((bed_pos_start - 10),", "% mpileup_path summary_lines += \"coverage_threshold: %d\\n\" % coverage_threshold summary_lines +=", "output_prefix, thread_id=0, bed_position=\"\", ): # Sanity checking the existence of", "BAM. alignments_path_bam = alignments_path if os.path.exists(alignments_path) == False: sys.stderr.write('ERROR: File", "alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## else: ret_num_called_bases[0] += 1", "str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); # return most_common_deletion_length; variant_line =", "In this case, either the base count consensus wins, or", "\"\": if not os.path.exists(os.path.dirname(output_prefix)): os.makedirs(os.path.dirname(output_prefix)) variant_file = \"%s-cov_%d.variant.csv\" % (output_prefix,", "coverage = %.2f\" % ( i, ret_snp_count[0], ret_insertion_count[0], ret_deletion_count[0], ret_num_undercovered_bases[0],", "\"0123456789\": j += 1 num_bases = int(bases[(i + 1) :", "ref_field = ref_base alt_field = \"N\" vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" %", "None: fp_variant.write(\"\\n\".join(ret_variant_list[variant_list_length:]) + \"\\n\") fp_variant.flush() if len(ret_vcf_list) > vcf_list_length and", "specified through commandline:\\n\") sys.stderr.write('\\tChromosome: \"%s\"\\n' % bed_chromosome) sys.stderr.write(\"\\tStart: %d\\n\" %", "1 num_bases = int(bases[(i + 1) : j]) skip_bases =", "point is already decided if there is going # to", "indel_length = most_common_insertion_length variant_line = ( \"ins\\tpos = %s\\tref =", "insertion/deletion count is ambiguous. pass return 0 def process_mpileup( alignments_path,", "open file \"%s\" for reading!\\n' % mpileup_path ) return None", "> 0: num_bases_to_skip -= 1 continue if use_bed == True:", "'line_number: %d' % line_number; # print line; # print '';", "= %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_insertion_count = %d\\tref_base = %s\\tcons_base", ") fp_vcf.write( '##INFO=<ID=HRUN,Number=1,Type=Integer,Description=\"Homopolymer length to the right of report indel", "the most common one. if len(list(deletion_event_counts.keys())) > 0: sorted_deletion_counts =", "sys.stderr.write( \"\\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\\n\" % sys.argv[0] )", "number of bases that need to be skipped in the", "i) + num_bases - 1 deletion_count += 1 deletion =", "mpileup_path = \"%s.mpileup\" % alignments_path_bam mpileup_exists = os.path.exists(mpileup_path) if mpileup_exists", "mpileup file if it doesn't exist yet. mpileup_path = \"%s.mpileup\"", "open file \"%s\" for writing!\\n' % (summary_file) ) return None", "= %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\" % (", "qual, info, ) ret_vcf_list.append(vcf_line) ################## else: ret_num_called_bases[0] += 1 ret_coverage_sum[0]", "len(ret_vcf_list) > vcf_list_length and fp_vcf != None: fp_vcf.write(\"\\n\".join(ret_vcf_list[vcf_list_length:]) + \"\\n\")", "> non_indel_coverage_next_base ): # In this case, insertions are a", "try: fp_sum = open(summary_file, \"w\") fp_sum.write(summary_lines) fp_sum.close() return summary_file except", "qual = 1000 info = \"DP=%s;TYPE=del\" % (coverage) ref_field =", "elif alignments_path.endswith(\"bam\") == False: sys.stderr.write( 'ERROR: File extension needs to", "# Get the number of bases that are inserted; j", "current_pos >= bed_pos_end: i += 1 j += 1 continue", "# ret_insertion_count[0] += most_common_insertion_length; ret_insertion_count[0] += 1 ret_num_called_bases[0] += most_common_insertion_length", "file, then convert it to a sorted BAM. alignments_path_bam =", "see if there actually were any deletions (to avoid index", "bases\">\\n' ) fp_vcf.write( '##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant is an INDEL.\">\\n'", "5): # sys.stderr.write('Usage:\\n'); # sys.stderr.write('\\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2>", "ret_variant_list.append(line_number); variant_line = ( \"SNP\\tpos = %s\\tref = %s\\tcoverage =", "= 0 indel_length = most_common_insertion_length variant_line = ( \"ins\\tpos =", "sum( [ int(deletion_count[1] == most_common_deletion_count) for deletion_count in sorted_deletion_counts ]", "python # Copyright <NAME>, 2015. www.sovic.org # # Creates a", "'position: %s' % position; # print 'bases: \"%s\"' % bases;", "> most_common_deletion_count and most_common_insertion_count > non_indel_coverage_next_base ): # In this", "+ insertion_count ) > coverage_threshold: # Sanity check, just to", "sys.stderr.write( \"\\nWarning: a SNP was detected, but there were no", "= \"DP=%s;TYPE=del\" % (coverage) ref_field = \"%s%s\" % (ref_base, sorted_deletion_counts[-1][0])", "bases # that need to be skipped. i += skip_bases", "name already exists. if alignments_path_bam_exists == False or ( alignments_path_bam_exists", "skipped because of a deletion # consensus), then the deletions", "== r\"-\": # This marks the occurance of deletions. It", "of bounds error). # If there are insertions, get the", "+= 1 ret_coverage_sum[0] += int(coverage) # TODO: Should I count", "( i, ret_snp_count[0], ret_insertion_count[0], ret_deletion_count[0], ret_num_undercovered_bases[0], (float(ret_coverage_sum[0]) / float((i +", ") ) sys.stderr.write(\"\\n\") else: ret_num_correct_bases[0] += 1 if verbose ==", "allele (snp, ins, del, mnp, complex)\">\\n' ) fp_vcf.write( '##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele Frequency\">\\n'", "position, ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## else: ret_num_called_bases[0]", "bed_chromosome; i += 1 j += 1 continue if thread_id", "False if ( most_common_insertion_count > most_common_deletion_count and most_common_insertion_count > non_indel_coverage_next_base", "else: most_common_deletion_count = 0 most_common_deletion_length = 0 deletion_unique = False", "fp_variant.flush() if len(ret_vcf_list) > vcf_list_length and fp_vcf != None: fp_vcf.write(\"\\n\".join(ret_vcf_list[vcf_list_length:])", "alignments_path, reference_path, (\"%s.mpileup\" % alignments_path_bam), coverage_threshold, output_prefix, thread_id, bed_position, )", "if current_pos < bed_pos_start or current_pos >= bed_pos_end: i +=", "Here we are interested even in the reads ### which", "def process_mpileup_line( line, line_number, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases,", "!= 2: use_bed = False else: bed_chromosome = bed_split[0] bed_pos_split", "line, and perform a sanity check. split_line = line.strip().split(\"\\t\") if", "sys.stderr.write(\"\\n\") else: ret_num_correct_bases[0] += 1 if verbose == True: sys.stdout.write(\"Reference", "output_prefix, thread_id, bed_position, ) def CollectSummaries( sam_files, prefix_for_intermediate_results, collective_output_file ):", "ref_base: is_good = True break if is_good == False: if", "reference_file = sys.argv[1] coverage_threshold = int(sys.argv[2]) output_prefix = sys.argv[3] sam_file", "don't know where it ends). non_indel_coverage_next_base = ( int(coverage) -", "> 10000): # break; fp.close() sys.stderr.write(\"\\n\") if fp_variant != None:", "= len(sorted_insertion_counts[-1][0]) insertion_unique = ( True if ( sum( [", "0) else (str(sorted_base_counts[-1][0])) ) qual = 1000 info = \"DP=%s;TYPE=snp\"", "one. if len(list(insertion_event_counts.keys())) > 0: sorted_insertion_counts = sorted( list(insertion_event_counts.items()), key=operator.itemgetter(1)", "j += 1 continue else: # print line_split[0]; # print", "of each allele (snp, ins, del, mnp, complex)\">\\n' ) fp_vcf.write(", "an allele aware count. # Get the number of bases", "file. command = \"samtools index %s %s.bai\" % ( alignments_path_bam,", "len(sorted_insertion_counts[-1][0]) insertion_unique = ( True if ( sum( [ int(insertion_count[1]", "= int(coverage) - current_base_deletion_count if verbose == True: sys.stdout.write(\"%s\\nbase_counts: %s\\n\"", "of the file, and the correctness of its extension. #", "one # through the '*' character. # Get the number", "== 1000000 or int(position) == 2000000 or int(position) == 3000000", "clear winner. if insertion_unique == True: # ret_insertion_count[0] += most_common_insertion_length;", "# Convert the sorted BAM file to a mpileup file", "else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ###", "a low frequency variant).\">\\n' ) fp_vcf.write( '##INFO=<ID=HRUN,Number=1,Type=Integer,Description=\"Homopolymer length to the", "alignments_path) return if alignments_path.endswith(\"sam\"): # Determine the path where the", "base_counts = {} insertion_count = 0 current_base_deletion_count = 0 deletion_count", "lines = fp_sum.readlines() fp_sum.close() except IOError: sys.stderr.write( 'ERROR: Could not", "bed_position != \"\": bed_split = bed_position.split(\":\") if len(bed_split) != 2:", "sorted_base_counts[-1][1] except Exception as e: pass # sys.stderr.write(str(e) + '\\n');", "= [] ret_snp_count = [0] ret_insertion_count = [0] ret_deletion_count =", "= %s\\tcoverage = %d\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\"", "os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path) ): command = \"samtools mpileup -B -d", "the variant is an INDEL.\">\\n' ) fp_vcf.write( '##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description=\"Indicates that the", "quality of the read. # increase_in_dict(base_counts, bases[i + 1].upper()); i", "len(original_bases): if original_bases[i] == \".\" or original_bases[i] == \",\": bases", "output_prefix != \"\": # summary_file = output_prefix + '.conssum'; summary_file", "= sorted_insertion_counts[-1][1] most_common_insertion_length = len(sorted_insertion_counts[-1][0]) insertion_unique = ( True if", "of the inserted bases # and the actual bases that", "current position). # Similar to the deletion marking, but here", "output_prefix == \"-\": output_prefix = os.path.splitext(sam_file)[0] main(sam_file, reference_file, coverage_threshold, output_prefix,", "while (i < bed_pos_end): # len(lines)): num_bases_to_skip = 0 for", "problematic case, discovered this on 03.11.2014., when analyzing BWA-MEM's mpileup.", "if __name__ == \"__main__\": # if (len(sys.argv) < 5): #", "the reads ### which had a '*' at the current", "of this base, or the non_indel_coverage_current_base? most_common_base_count = 0 ###", "vcf_list_length = len(ret_vcf_list) num_bases_to_skip = process_mpileup_line( line, i, ret_variant_list, ret_vcf_list,", "''; # sys.stdout.flush(); i = 0 while i < len(bases):", "is ambiguous. pass return 0 def process_mpileup( alignments_path, reference_path, mpileup_path,", "Depth\">\\n') fp_vcf.write( '##INFO=<ID=TYPE,Number=A,Type=String,Description=\"Type of each allele (snp, ins, del, mnp,", "+= 1 elif base == r\"*\": # This is a", "= \"samtools view -bS %s | samtools sort - %s\"", "skip_bases = (j - i) + num_bases - 1 deletion_count", "= 0 deletion_count = 0 insertion_event_counts = {} deletion_event_counts =", "i = 0 while i < len(bases): base = bases[i]", "we actually care about the bases, # and we need", "None try: fp = open(mpileup_path, \"r\") except IOError: sys.stderr.write( 'ERROR:", "ret_coverage_sum[0] += 0; ret_coverage_sum[0] += int(coverage) # TODO: Should I", "ret_num_undercovered_bases[0], (float(ret_coverage_sum[0]) / float((i + 1))), ) ) sys.stderr.flush() variant_list_length", "ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=use_bed,", "pass # variant_line = 'undercovered1\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr =", "calls consensus bases (or variants). import os import sys import", "for deletion_count in sorted_deletion_counts ] ) == 1 ) else", "bases follow the current position). # Similar to the deletion", "%s\\t%s\" % ( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_deletion_count, ref_base, sorted_base_counts[-1][0],", "\"num_undercovered_bases: %d\\n\" % ret_num_undercovered_bases[0] summary_lines += \"num_called_bases: %d\\n\" % ret_num_called_bases[0]", "total coverage of the current base with the coverage threshold.", "starting position of a read. It encodes two # symbols:", "sys.stderr.write(str(base_counts) + '\\n'); # sys.stderr.write('original_bases:\\n'); # sys.stderr.write(str(original_bases) + '\\n'); #", "fp_collect.close() if __name__ == \"__main__\": # if (len(sys.argv) < 5):", "# Sanity checking the existence of the file, and the", "path: \"%s\".\\n' % alignments_path ) return # Convert the sorted", "################## else: ret_num_called_bases[0] += 1 ret_coverage_sum[0] += int(coverage) # TODO:", "be skipped. i += skip_bases elif base == r\"+\": #", "not open file \"%s\" for writing!\\n' % (summary_file) ) return", "%s' % position; # print 'bases: \"%s\"' % bases; #", "# TODO: An additional problematic case, discovered this on 03.11.2014.,", "'##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele Frequency\">\\n' ) fp_vcf.write( '##INFO=<ID=SB,Number=1,Type=Integer,Description=\"Phred-scaled strand bias at this position\">\\n'", "1 elif base == r\"*\": # This is a deletion,", "prior to occuring, and at that point is already decided", "summary_lines += \"num_called_bases: %d\\n\" % ret_num_called_bases[0] summary_lines += \"num_correct_bases: %d\\n\"", "split_line[2] coverage = split_line[3] original_bases = split_line[4] if len(split_line) ==", "yet. mpileup_path = \"%s.mpileup\" % alignments_path_bam mpileup_exists = os.path.exists(mpileup_path) if", "sys.stderr.write('\\t(If <collective_output_file> is equal to \"-\", no files will be", "sys.argv[5] # sys.stderr.write('bed_position: \"%s\"\\n\\n' % bed_position); processes = [] if", "{} deletion_event_counts = {} end_counts = 0 # print 'position:", "<reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\\n\" % sys.argv[0] ) sys.stderr.write( '\\t(If", "% (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); # return", "+= 1 # variant_line = 'deletion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts),", "if mpileup_exists == False or ( mpileup_exists == True and", "is a deletion, just count it. current_base_deletion_count += 1 elif", "# ret_coverage_sum[0] += 0; ret_coverage_sum[0] += int(coverage) # TODO: Should", "% collective_output_file ) return for sam_file in sam_files: summary_file =", "fp_vcf.write( '##INFO=<ID=HRUN,Number=1,Type=Integer,Description=\"Homopolymer length to the right of report indel position\">\\n'", "\"%s-cov_%d.variant.sum\" % (output_prefix, coverage_threshold) try: fp_sum = open(summary_file, \"w\") fp_sum.write(summary_lines)", "# exit(1); if len(sys.argv) < 5: sys.stderr.write(\"Usage:\\n\") sys.stderr.write( \"\\t%s <reference_file_path>", "Previously I compared the total coverage of the current base", "sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); ### Handling indel consensus. ###", "if verbose == True: sys.stdout.write(\"%s\\nbase_counts: %s\\n\" % (line.strip(), str(base_counts))) #", "( position, ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, (\"{}\") if (len(sorted_base_counts)", "base_count[0] == ref_base: is_good = True break if is_good ==", "int(coverage), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), ) ) ret_variant_list.append(variant_line) ### VCF output", "os.path.exists(alignments_path_bam) # Check if a BAM file with the given", "): # In this case, insertions are a clear winner.", "sanity check. split_line = line.strip().split(\"\\t\") if len(split_line) < 5 or", "see if there actually were any insertions (to avoid index", "\"%s\"\\n\\n' % bed_position); processes = [] if output_prefix == \"-\":", "sorted( list(insertion_event_counts.items()), key=operator.itemgetter(1) ) most_common_insertion_count = sorted_insertion_counts[-1][1] most_common_insertion_length = len(sorted_insertion_counts[-1][0])", "Sanity check, just to see if there actually were any", "\"w\") fp_vcf.write(\"##fileformat=VCFv4.0\\n\") fp_vcf.write(\"##fileDate=20150409\\n\") fp_vcf.write(\"##source=%s\\n\" % (\" \".join(sys.argv))) fp_vcf.write(\"##reference=%s\\n\" % reference_path)", "fp_vcf.write( '##INFO=<ID=SB,Number=1,Type=Integer,Description=\"Phred-scaled strand bias at this position\">\\n' ) fp_vcf.write( '##INFO=<ID=DP4,Number=4,Type=Integer,Description=\"Counts", "%d\\n\" % ret_num_called_bases[0] summary_lines += \"num_correct_bases: %d\\n\" % ret_num_correct_bases[0] summary_lines", "most common one. if len(list(insertion_event_counts.keys())) > 0: sorted_insertion_counts = sorted(", "num_bases)].upper() increase_in_dict(insertion_event_counts, insertion) i += skip_bases else: increase_in_dict(base_counts, bases[i].upper()) i", "will be written to disk.)\\n' ) sys.stderr.write( '\\tPosition parameter is", "3000000 or int(position) == 4000000): # print '\\nTEST\\tpos = %s\\tcoverage", "try: dict_counter[value] += 1 except: dict_counter[value] = 1 def process_mpileup_line(", ") sys.stderr.write( '\\t(If <collective_output_file> is equal to \"-\", no files", "+= 1 # Increase only by 1, because we have", "reference_file, coverage_threshold, output_prefix, 0, bed_position) # if (output_prefix != '-'):", "%d' % line_number; # print line; # print ''; #", "bed_position=\"\", ): fp = None try: fp = open(mpileup_path, \"r\")", "gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D- # I chose to", "bed_pos_end: i += 1 j += 1 continue else: #", "str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### Insertions in the VCF", "where a insertion occurs. The ref position should contain the", "= %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\\n' % (position,", "bed_pos_split = bed_split[1].split(\"-\") if len(bed_pos_split) != 2: use_bed = False", "%d\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\" % ( position,", "file \"%s\" for reading!\\n' % mpileup_path ) return None ret_variant_list", "(to avoid index out of bounds error). # If there", "%s\" % ( position, ref_name, int(coverage), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), )", "% (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts),", "import operator import subprocess def increase_in_dict(dict_counter, value): try: dict_counter[value] +=", "% ret_num_undercovered_bases[0] summary_lines += \"num_called_bases: %d\\n\" % ret_num_called_bases[0] summary_lines +=", "-f %s %s > %s.mpileup\" % ( reference_path, alignments_path_bam, alignments_path_bam,", "in sorted_insertion_counts ] ) == 1 ) else False )", "else: ret_num_called_bases[0] += 1 ret_coverage_sum[0] += int(coverage) # TODO: Should", "= \"%s-cov_%d.variant.csv\" % (output_prefix, coverage_threshold) fp_variant = open(variant_file, \"w\") vcf_file", "% ( reference_path, alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\") sys.stderr.write('Processing file", "if ( non_indel_coverage_next_base + deletion_count + insertion_count ) > coverage_threshold:", "== True: # ret_insertion_count[0] += most_common_insertion_length; ret_insertion_count[0] += 1 ret_num_called_bases[0]", "as e: pass # sys.stderr.write(str(e) + '\\n'); # sys.stderr.write('sorted_base_counts:\\n'); #", "= %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts", "return 0 def process_mpileup( alignments_path, reference_path, mpileup_path, coverage_threshold, output_prefix, thread_id=0,", "a deletion, just count it. current_base_deletion_count += 1 elif base", "% (ref_base)) sys.stdout.write(\"Consensus base: %s\\n\\n\" % (base_count[0])) # if (int(position)", "0 deletion_count = 0 insertion_event_counts = {} deletion_event_counts = {}", "specifies the position where a insertion occurs. The ref position", "split_line[3] original_bases = split_line[4] if len(split_line) == 6: qualities =", "continue fp_collect.write(\"\".join(lines) + \"\\n\") fp_collect.close() if __name__ == \"__main__\": #", "> %s.mpileup\" % ( reference_path, alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\")", "only by 1, because we have i += 1 down", "= \"\" summary_lines += \"alignments_file: %s\\n\" % alignments_path summary_lines +=", "sorted BAM file. command = \"samtools view -bS %s |", "1 ret_num_called_bases[0] += most_common_insertion_length # variant_line = 'insertion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_insertion_count,", "<collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\\n' % sys.argv[0]); # sys.stderr.write('\\t(If <collective_output_file>", "### Put a different coverage threshold. Here we are interested", "and perform a sanity check. split_line = line.strip().split(\"\\t\") if len(split_line)", "= 1000 info = \"DP=%s;TYPE=snp\" % (coverage) ref_field = ref_base", "'bases: \"%s\"' % bases; # print 'line_number: %d' % line_number;", "'-', the number of the deleted bases # and the", "<{sb}am_file_3> ...]\\n' % sys.argv[0]); # sys.stderr.write('\\t(If <collective_output_file> is equal to", "or the non_indel_coverage_current_base? most_common_base_count = 0 ### Handling base consensus.", "# This marks the occurance of deletions. It is a", "process_mpileup( alignments_path, reference_path, (\"%s.mpileup\" % alignments_path_bam), coverage_threshold, output_prefix, thread_id, bed_position,", "r\"*\": # This is a deletion, just count it. current_base_deletion_count", "line = lines[i]; if num_bases_to_skip > 0: num_bases_to_skip -= 1", "for base_count in sorted_base_counts: if base_count[1] == most_common_base_count: if base_count[0]", "with the first base being non-deletion, and the following bases", "skip_bases else: increase_in_dict(base_counts, bases[i].upper()) i += 1 # TODO: An", "(j % 1000) == 0: sys.stderr.write( \"\\r[%d] snps = %d,", "(or variants). import os import sys import operator import subprocess", "either .sam or .bam! Input file path: \"%s\".\\n' % alignments_path", "most_common_deletion_count = 0 most_common_deletion_length = 0 deletion_unique = False if", "skip_bases elif base == r\"+\": # This marks the occurance", "################## else: sys.stderr.write( \"\\nWarning: a SNP was detected, but there", "= most_common_insertion_length variant_line = ( \"ins\\tpos = %s\\tref = %s\\tnon_indel_cov_next", "ref_name, position, ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## return", "int(position) == 2000000 or int(position) == 3000000 or int(position) ==", "original_bases = split_line[4] if len(split_line) == 6: qualities = split_line[5]", "i += 1 base_counts = {} insertion_count = 0 current_base_deletion_count", "a read. end_counts += 1 elif base == r\"*\": #", "deletions. It is a composite object # consisting of: the", "> most_common_insertion_count and most_common_deletion_count > non_indel_coverage_next_base ): # In this", "base count consensus wins, or the # insertion/deletion count is", "'\\n'); # sys.stderr.write('original_bases:\\n'); # sys.stderr.write(str(original_bases) + '\\n'); # sys.stderr.write('line:\\n'); #", "fp_variant.write(\"\\n\".join(ret_variant_list[variant_list_length:]) + \"\\n\") fp_variant.flush() if len(ret_vcf_list) > vcf_list_length and fp_vcf", "= -1 if bed_position != \"\": bed_split = bed_position.split(\":\") if", "key=operator.itemgetter(1) ) most_common_insertion_count = sorted_insertion_counts[-1][1] most_common_insertion_length = len(sorted_insertion_counts[-1][0]) insertion_unique =", "line.strip(), ) ) sys.stderr.write(\"\\n\") else: ret_num_correct_bases[0] += 1 if verbose", "bases # and the actual bases that are deleted (these", "most_common_base_count = sorted_base_counts[-1][1] except Exception as e: most_common_base_count = 0", "ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=False, ): #", "= False # Sanity check, just to see if there", "most common one. if len(list(deletion_event_counts.keys())) > 0: sorted_deletion_counts = sorted(", "= fp_sum.readlines() fp_sum.close() except IOError: sys.stderr.write( 'ERROR: Could not open", "\"num_called_bases: %d\\n\" % ret_num_called_bases[0] summary_lines += \"num_correct_bases: %d\\n\" % ret_num_correct_bases[0]", "( alignments_path, os.path.splitext(alignments_path_bam)[0], ) sys.stderr.write(command + \"\\n\") subprocess.call(command, shell=\"True\") #", "fp_sum.readlines() fp_sum.close() except IOError: sys.stderr.write( 'ERROR: Could not open file", "lines[i]; if num_bases_to_skip > 0: num_bases_to_skip -= 1 continue if", "most_common_insertion_length # variant_line = 'insertion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts),", "else: sys.stderr.write( \"\\nWarning: a SNP was detected, but there were", "if there is going # to be a deletion event.", "+= 1 # if (i > 10000): # break; fp.close()", "original_bases[i] i += 1 base_counts = {} insertion_count = 0", "ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=False, ):", "path where the new BAM file will be generated. dir_name", "end_counts += 1 elif base == r\"*\": # This is", "/ float((i + 1))), ) ) sys.stderr.flush() variant_list_length = len(ret_variant_list)", "\"\" # Replace the '.' and ',' signs with the", "consensus variant (as opposed to a low frequency variant).\">\\n' )", "( \"undercovered1\\tpos = %s\\tref = %s\\tcoverage = %d\\tbase_counts = %s\\tinsertion_counts", "% ret_insertion_count[0] summary_lines += \"deletion_count: %d\\n\" % ret_deletion_count[0] summary_lines +=", "to make an allele aware count. # Get the number", "sorted_base_counts!\" ) variant_line = ( \"SNP\\tpos = %s\\tref = %s\\tcoverage", "the deletions denoted with the '*' sign, which I think", "03.11.2014., when analyzing BWA-MEM's mpileup. # There are pileup bases", "there actually were any deletions (to avoid index out of", "- 10), 0) j = 0 # while (i <", "= %s\\tdeletion_counts = %s\\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base,", "# Increase only by 1, because we have i +=", "if not os.path.exists(os.path.dirname(output_prefix)): os.makedirs(os.path.dirname(output_prefix)) variant_file = \"%s-cov_%d.variant.csv\" % (output_prefix, coverage_threshold)", "deleted bases # and the actual bases that are deleted", "% alignments_path_bam mpileup_exists = os.path.exists(mpileup_path) if mpileup_exists == False or", "case where there are multiple equally good choices. # In", "a sorted BAM file. command = \"samtools view -bS %s", "(output_prefix, coverage_threshold) try: fp_sum = open(summary_file, \"w\") fp_sum.write(summary_lines) fp_sum.close() return", "> variant_list_length and fp_variant != None: fp_variant.write(\"\\n\".join(ret_variant_list[variant_list_length:]) + \"\\n\") fp_variant.flush()", "== True and os.path.getmtime(alignments_path) > os.path.getmtime(alignments_path_bam) ): # Convert the", "coverage_threshold) try: fp_sum = open(summary_file, \"w\") fp_sum.write(summary_lines) fp_sum.close() return summary_file", "BAM file. command = \"samtools view -bS %s | samtools", "+= 1 elif base == r\"-\": # This marks the", "== 0: if (j % 1000) == 0: sys.stderr.write( \"\\r[%d]", "fp_vcf.write( '##INFO=<ID=TYPE,Number=A,Type=String,Description=\"Type of each allele (snp, ins, del, mnp, complex)\">\\n'", "position where a deletion occurs, with the first base being", "[0] ret_num_called_bases = [0] ret_num_correct_bases = [0] ret_coverage_sum = [0]", "occurs, with the first base being non-deletion, and the following", "and a char marking the # mapping quality of the", "1938202 T 20 ******************** 8,2*#-;)$B>2$1&D- # I chose to handle", "> 5: bed_position = sys.argv[5] # sys.stderr.write('bed_position: \"%s\"\\n\\n' % bed_position);", "the sorted BAM file to a mpileup file if it", "0 # bed_pos_end = len(lines); bed_pos_end = -1 if bed_position", "= 0 ### Handling base consensus. sorted_base_counts = sorted( list(base_counts.items()),", "+= \"average_coverage: %.2f\\n\" % ( (float(ret_coverage_sum[0]) / float((i + 1)))", "% ret_num_correct_bases[0] summary_lines += \"average_coverage: %.2f\\n\" % ( (float(ret_coverage_sum[0]) /", "a composite object # consisting of: the special character '-',", "= 0 # bed_pos_end = len(lines); bed_pos_end = -1 if", "for writing!\\n' % collective_output_file ) return for sam_file in sam_files:", "try: most_common_base_count = sorted_base_counts[-1][1] except Exception as e: most_common_base_count =", "# Determine the path where the new BAM file will", "just to see if there actually were any deletions (to", "bases[j : (j + num_bases)].upper() increase_in_dict(insertion_event_counts, insertion) i += skip_bases", "%.2f\" % ( i, ret_snp_count[0], ret_insertion_count[0], ret_deletion_count[0], ret_num_undercovered_bases[0], (float(ret_coverage_sum[0]) /", "'insertion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); try:", "int(coverage) < coverage_threshold: ret_num_undercovered_bases[0] += 1 # ret_coverage_sum[0] += 0;", "5 or len(split_line) > 6: sys.stderr.write(line + \"\\n\") return 0", "6: sys.stderr.write(line + \"\\n\") return 0 ref_name = split_line[0] position", "str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); ### Handling indel consensus. ### Put", "sys.stderr.write(\"\\tEnd: %d\\n\\n\" % bed_pos_end) # i = 0; i =", "sys.stderr.write( 'ERROR: File extension needs to be either .sam or", "return 0 ref_name = split_line[0] position = split_line[1] ref_base =", "- end_counts - deletion_count - insertion_count ) if ( non_indel_coverage_next_base", "ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=use_bed, ) if len(ret_variant_list) > variant_list_length and", "(j + num_bases)].upper() increase_in_dict(insertion_event_counts, insertion) i += skip_bases else: increase_in_dict(base_counts,", ") else: most_common_insertion_count = 0 most_common_insertion_length = 0 insertion_unique =", "% (ref_base, sorted_insertion_counts[-1][0]) vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position,", "choice which is equal to the reference. is_good = False", "a deletion occurs, with the first base being non-deletion, and", "bed_split = bed_position.split(\":\") if len(bed_split) != 2: use_bed = False", "sys.stderr.write(str(original_bases) + '\\n'); # sys.stderr.write('line:\\n'); # sys.stderr.write(line.strip() + '\\n'); #", "open(collective_output_file, \"w\") except IOError: sys.stderr.write( 'ERROR: Could not open file", "the following bases being a deletion event. ### VCF output", "os.path.splitext(sam_file)[0] main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position) # if (output_prefix", "command = \"samtools mpileup -B -d 1000000 -Q 0 -A", "alt_field = ref_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position,", "[\"G\", 0]] sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1) ) try: most_common_base_count", "for reading!\\n' % mpileup_path ) return None ret_variant_list = []", "the special character '+', the number of the inserted bases", "be generated. dir_name = os.path.dirname(alignments_path) if dir_name == \"\": dir_name", "subprocess def increase_in_dict(dict_counter, value): try: dict_counter[value] += 1 except: dict_counter[value]", "\"__main__\": # if (len(sys.argv) < 5): # sys.stderr.write('Usage:\\n'); # sys.stderr.write('\\t%s", "/usr/bin/env python # Copyright <NAME>, 2015. www.sovic.org # # Creates", "= 1 def process_mpileup_line( line, line_number, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count,", "-B -d 1000000 -Q 0 -A -f %s %s >", "file if it doesn't exist yet. mpileup_path = \"%s.mpileup\" %", "a deletion # consensus), then the deletions on this base", "(coverage) ref_field = \"%s%s\" % (ref_base, sorted_deletion_counts[-1][0]) alt_field = ref_base", "__name__ == \"__main__\": # if (len(sys.argv) < 5): # sys.stderr.write('Usage:\\n');", "= split_line[0] position = split_line[1] ref_base = split_line[2] coverage =", "+= 1 except: dict_counter[value] = 1 def process_mpileup_line( line, line_number,", "= ( \"undercovered1\\tpos = %s\\tref = %s\\tcoverage = %d\\tbase_counts =", "%s\\tref = %s\\tcoverage = %d\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts =", "= %s\\tdeletion_counts = %s\\t%s\" % ( position, ref_name, int(coverage), non_indel_coverage_current_base,", "bases in the sorted_base_counts!\" ) variant_line = ( \"SNP\\tpos =", "line, i, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases,", "sys.stderr.write(\"Usage:\\n\") sys.stderr.write( \"\\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\\n\" % sys.argv[0]", "sys.stdout.write(\"Consensus base: %s\\n\\n\" % (base_count[0])) # if (int(position) == 100000", "# print bed_chromosome; i += 1 j += 1 continue", "list(base_counts.items()), key=operator.itemgetter(1) ) try: most_common_base_count = sorted_base_counts[-1][1] except Exception as", "str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); # return most_common_deletion_length; variant_line = (", "current_pos < bed_pos_start or current_pos >= bed_pos_end: i += 1", "we count deletions one by one # through the '*'", "alignments_path_bam, ) subprocess.call(command, shell=\"True\") sys.stderr.write('Processing file \"%s\"...\\n' % alignments_path) sys.stderr.write('Reference", "= process_mpileup( alignments_path, reference_path, (\"%s.mpileup\" % alignments_path_bam), coverage_threshold, output_prefix, thread_id,", "= %s\\tref = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count = %d\\tref_base", "%s | samtools sort - %s\" % ( alignments_path, os.path.splitext(alignments_path_bam)[0],", "True and os.path.getmtime(alignments_path) > os.path.getmtime(alignments_path_bam) ): # Convert the SAM", "( sum( [ int(insertion_count[1] == most_common_insertion_count) for insertion_count in sorted_insertion_counts", "2: use_bed = False else: bed_pos_start = int(bed_pos_split[0]) bed_pos_end =", "= bed_position.split(\":\") if len(bed_split) != 2: use_bed = False else:", "= 0; i = 0 if (use_bed == False) else", "ref_name, position, ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## else:", "(int(coverage) < coverage_threshold or int(coverage) == current_base_deletion_count): # if (non_indel_coverage_current_base", "file \"%s\" for writing!\\n' % collective_output_file ) return for sam_file", "the same as ref, but the alt field contains the", "and most_common_deletion_count > non_indel_coverage_next_base ): # In this case, deletions", "mpileup_path ) return None ret_variant_list = [] ret_vcf_list = []", "open(variant_file, \"w\") vcf_file = \"%s-cov_%d.variant.vcf\" % (output_prefix, coverage_threshold) fp_vcf =", "\"\" if len(sys.argv) > 5: bed_position = sys.argv[5] # sys.stderr.write('bed_position:", "base == r\"+\": # This marks the occurance of an", "position should contain the base which is the same as", "= %d\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\" % (", "marks the occurance of deletions. It is a composite object", "<collective_output_file> is equal to \"-\", no files will be written", "sys.stdout.flush(); i = 0 while i < len(bases): base =", "base, or the non_indel_coverage_current_base? most_common_base_count = 0 ### Handling base", "+ 1))) ) sys.stderr.write(summary_lines + \"\\n\") sys.stderr.write(\"\\n\") if output_prefix !=", "most_common_insertion_count > most_common_deletion_count and most_common_insertion_count > non_indel_coverage_next_base ): # In", "subprocess.call(command, shell=\"True\") elif alignments_path.endswith(\"bam\") == False: sys.stderr.write( 'ERROR: File extension", "pass # sys.stderr.write(str(e) + '\\n'); # sys.stderr.write('sorted_base_counts:\\n'); # sys.stderr.write(str(sorted_base_counts) +", "bases[j] in \"0123456789\": j += 1 num_bases = int(bases[(i +", "# insertion/deletion count is ambiguous. pass return 0 def process_mpileup(", "+= most_common_insertion_length; ret_insertion_count[0] += 1 ret_num_called_bases[0] += most_common_insertion_length # variant_line", "inserted (these bases follow the current position). # Similar to", "= \"%s%s\" % (ref_base, sorted_deletion_counts[-1][0]) alt_field = ref_base vcf_line =", "bounds error). # If there are insertions, get the most", "dir_name = \".\" alignments_path_bam = ( dir_name + \"/\" +", "(j - i) + num_bases - 1 insertion_count += 1", "actually were any insertions (to avoid index out of bounds", "< 5: sys.stderr.write(\"Usage:\\n\") sys.stderr.write( \"\\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\\n\"", "%s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s' % (position, int(coverage),", "coverage_threshold summary_lines += \"snp_count: %d\\n\" % ret_snp_count[0] summary_lines += \"insertion_count:", "need to make an allele aware count. # Get the", "this base, or the non_indel_coverage_current_base? sorted_base_counts = [[\"A\", 0], [\"C\",", "ret_vcf_list.append(vcf_line) ################## elif ( most_common_deletion_count > most_common_insertion_count and most_common_deletion_count >", "bed_chromosome) sys.stderr.write(\"\\tStart: %d\\n\" % bed_pos_start) sys.stderr.write(\"\\tEnd: %d\\n\\n\" % bed_pos_end) #", "sys.stderr.write('line:\\n'); # sys.stderr.write(line.strip() + '\\n'); # most_common_base_count = 0; #", "num_bases - 1 deletion_count += 1 deletion = bases[j :", "% coverage_threshold) summary_file = process_mpileup( alignments_path, reference_path, (\"%s.mpileup\" % alignments_path_bam),", "insertion_count += 1 insertion = bases[j : (j + num_bases)].upper()", "\"%s\"...\\n' % reference_path) sys.stderr.write(\"Coverage threshold: %d\\n\" % coverage_threshold) summary_file =", "signs with the actual reference base. i = 0 while", "0: ret_snp_count[0] += 1 # ret_variant_list.append(line_number); variant_line = ( \"SNP\\tpos", "= True break if is_good == False: if len(sorted_base_counts) >", "sys.stderr.write(\"Coverage threshold: %d\\n\" % coverage_threshold) summary_file = process_mpileup( alignments_path, reference_path,", "+ the insertion event. ### VCF output ### alt_base =", "case, either the base count consensus wins, or the #", "being non-deletion, and the following bases being a deletion event.", "1))), ) ) sys.stderr.flush() variant_list_length = len(ret_variant_list) vcf_list_length = len(ret_vcf_list)", "special character '+', the number of the inserted bases #", "sorted_base_counts[-1][0] except: temp_sorted_bc = 0 indel_length = most_common_insertion_length variant_line =", "either the base count consensus wins, or the # insertion/deletion", "occurs. The ref position should contain the base which is", "%s\\tcoverage = %d\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\" %", "'\\t(If <collective_output_file> is equal to \"-\", no files will be", "+ 1].upper()); i += 1 # Increase only by 1,", "base == r\"$\": # This marks the end of a", "sys.stderr.write(\"\\n\") if fp_variant != None: fp_variant.close() if fp_vcf != None:", "################## elif ( most_common_deletion_count > most_common_insertion_count and most_common_deletion_count > non_indel_coverage_next_base", "ret_num_correct_bases[0] += 1 if verbose == True: sys.stdout.write(\"Reference base: %s\\n\"", "where the new BAM file will be generated. dir_name =", "%s\\t%s\" % ( position, ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, (\"{}\")", "alignments_path.endswith(\"sam\"): # Determine the path where the new BAM file", "file \"%s\" for writing!\\n' % (summary_file) ) return None return", "print line; # print ''; # sys.stdout.flush(); i = 0", "# In this case, either the base count consensus wins,", "= False if ( most_common_insertion_count > most_common_deletion_count and most_common_insertion_count >", "approach, we ignore this case, because we count deletions one", "we are interested even in the reads ### which had", "alignments_path_bam mpileup_exists = os.path.exists(mpileup_path) if mpileup_exists == False or (", "increase_in_dict(insertion_event_counts, insertion) i += skip_bases else: increase_in_dict(base_counts, bases[i].upper()) i +=", "# sys.stderr.write('base_counts:\\n'); # sys.stderr.write(str(base_counts) + '\\n'); # sys.stderr.write('original_bases:\\n'); # sys.stderr.write(str(original_bases)", "most_common_deletion_length = len(sorted_deletion_counts[-1][0]) deletion_unique = ( True if ( sum(", "ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=use_bed, ) if len(ret_variant_list) > variant_list_length", "# Check if a BAM file with the given name", "# sys.stderr.write(line.strip() + '\\n'); # most_common_base_count = 0; # Allow", "reads ### which had a '*' at the current position", "ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=use_bed, )", "most_common_insertion_length variant_line = ( \"ins\\tpos = %s\\tref = %s\\tnon_indel_cov_next =", "length of the numeric entry plus the actual number of", "not exist!\\n' % alignments_path) return if alignments_path.endswith(\"sam\"): # Determine the", "the actual bases that are inserted (these bases follow the", "Could not open file \"%s\" for writing!\\n' % (summary_file) )", "is_good == False: if len(sorted_base_counts) > 0: ret_snp_count[0] += 1", "non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count, ref_base, temp_sorted_bc, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), )", "read. It encodes two # symbols: '^' marking the read", "% ( (float(ret_coverage_sum[0]) / float((i + 1))) ) sys.stderr.write(summary_lines +", "\"r\") lines = fp_sum.readlines() fp_sum.close() except IOError: sys.stderr.write( 'ERROR: Could", "return most_common_deletion_length else: # In this case, either the base", "= open(variant_file, \"w\") vcf_file = \"%s-cov_%d.variant.vcf\" % (output_prefix, coverage_threshold) fp_vcf", "ref_base, (\"{}\") if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts),", "base == r\"*\": # This is a deletion, just count", "coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\\n' % sys.argv[0]); # sys.stderr.write('\\t(If", ") qual = 1000 info = \"DP=%s;TYPE=snp\" % (coverage) ref_field", "1 insertion_count += 1 insertion = bases[j : (j +", "each allele (snp, ins, del, mnp, complex)\">\\n' ) fp_vcf.write( '##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele", "'\\n'); # sys.stderr.write('line:\\n'); # sys.stderr.write(line.strip() + '\\n'); # most_common_base_count =", "this position\">\\n' ) fp_vcf.write( '##INFO=<ID=DP4,Number=4,Type=Integer,Description=\"Counts for ref-forward bases, ref-reverse, alt-forward", "and the correctness of its extension. # Also, if input", "coverage of this base, or the non_indel_coverage_current_base? sorted_base_counts = [[\"A\",", "reference base. i = 0 while i < len(original_bases): if", "the deleted bases # and the actual bases that are", "not open file \"%s\" for reading!\\n' % summary_file ) continue", "= int(bed_pos_split[1]) use_bed = True sys.stderr.write(\"Using location specified through commandline:\\n\")", "to see if there actually were any insertions (to avoid", "% alignments_path summary_lines += \"mpileup_file: %s\\n\" % mpileup_path summary_lines +=", "and most_common_insertion_count > non_indel_coverage_next_base ): # In this case, insertions", "= %s\\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts),", "] ) == 1 ) else False ) else: most_common_deletion_count", "and the following bases being a deletion event. ### VCF", "%s\\tdeletion_counts = %s\\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0],", "use_bed = False bed_chromosome = \"\" bed_pos_start = 0 #", "the current position). # In our approach, we ignore this", "import sys import operator import subprocess def increase_in_dict(dict_counter, value): try:", "str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); variant_line = ( \"undercovered1\\tpos = %s\\tref", "% mpileup_path ) return None ret_variant_list = [] ret_vcf_list =", "ret_num_correct_bases = [0] ret_coverage_sum = [0] # lines = fp.readlines();", "return for sam_file in sam_files: summary_file = prefix_for_intermediate_results + \".sum\"", "most_common_deletion_length = 0 deletion_unique = False if ( most_common_insertion_count >", "= bed_split[0] bed_pos_split = bed_split[1].split(\"-\") if len(bed_pos_split) != 2: use_bed", "= 0 insertion_event_counts = {} deletion_event_counts = {} end_counts =", "1 # ret_coverage_sum[0] += 0; ret_coverage_sum[0] += int(coverage) # TODO:", "if len(list(insertion_event_counts.keys())) > 0: sorted_insertion_counts = sorted( list(insertion_event_counts.items()), key=operator.itemgetter(1) )", "increase_in_dict(base_counts, bases[i].upper()) i += 1 # TODO: An additional problematic", "0, bed_position) # if (output_prefix != '-'): # CollectSummaries([sam_file], output_prefix,", "VCF output ### qual = 1000 info = \"DP=%s;TYPE=snp\" %", "reference_path, alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\") sys.stderr.write('Processing file \"%s\"...\\n' %", "line.strip().split(\"\\t\") if len(line_split) > 2 and line_split[0] == bed_chromosome: current_pos", "the reference. is_good = False for base_count in sorted_base_counts: if", "= None try: fp = open(mpileup_path, \"r\") except IOError: sys.stderr.write(", "1 j += 1 continue if thread_id == 0: if", "ret_snp_count[0] summary_lines += \"insertion_count: %d\\n\" % ret_insertion_count[0] summary_lines += \"deletion_count:", "ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); variant_line =", "are ignored. # if (int(coverage) < coverage_threshold or int(coverage) ==", "(\"{}\") if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])) ) qual =", "of bases that are inserted; j = i + 1", "alt_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field, alt_field,", "sorted_deletion_counts = sorted( list(deletion_event_counts.items()), key=operator.itemgetter(1) ) most_common_deletion_count = sorted_deletion_counts[-1][1] most_common_deletion_length", "alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\") elif alignments_path.endswith(\"bam\") == False: sys.stderr.write(", ") ) ret_variant_list.append(variant_line) ### VCF output ### qual = 1000", "ret_variant_list.append(variant_line) ### Insertions in the VCF format specifies the position", "If there are insertions, get the most common one. if", "(float(ret_coverage_sum[0]) / float((i + 1))) ) sys.stderr.write(summary_lines + \"\\n\") sys.stderr.write(\"\\n\")", "/ float((i + 1))) ) sys.stderr.write(summary_lines + \"\\n\") sys.stderr.write(\"\\n\") if", "BAM file will be generated. dir_name = os.path.dirname(alignments_path) if dir_name", "or the non_indel_coverage_current_base? sorted_base_counts = [[\"A\", 0], [\"C\", 0], [\"T\",", "len(split_line) > 6: sys.stderr.write(line + \"\\n\") return 0 ref_name =", "1 insertion = bases[j : (j + num_bases)].upper() increase_in_dict(insertion_event_counts, insertion)", "= os.path.exists(mpileup_path) if mpileup_exists == False or ( mpileup_exists ==", "+ num_bases)].upper() increase_in_dict(deletion_event_counts, deletion) # Skip the length of the", "# gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D- # I chose", "= \"DP=%s;TYPE=snp\" % (coverage) ref_field = ref_base alt_field = \"N\"", "info, ) ret_vcf_list.append(vcf_line) ################## elif ( most_common_deletion_count > most_common_insertion_count and", "= \"%s-cov_%d.variant.sum\" % (output_prefix, coverage_threshold) try: fp_sum = open(summary_file, \"w\")", "temp_sorted_bc = 0 indel_length = most_common_insertion_length variant_line = ( \"ins\\tpos", "we have i += 1 down there. elif base ==", "\"w\") vcf_file = \"%s-cov_%d.variant.vcf\" % (output_prefix, coverage_threshold) fp_vcf = open(vcf_file,", "deletion marking, but here we actually care about the bases,", "fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw Depth\">\\n') fp_vcf.write( '##INFO=<ID=TYPE,Number=A,Type=String,Description=\"Type of each allele (snp, ins, del,", "\"-\", no files will be written to disk.)\\n' ) sys.stderr.write(", "alignments_path, os.path.splitext(alignments_path_bam)[0], ) sys.stderr.write(command + \"\\n\") subprocess.call(command, shell=\"True\") # Create", "files will be written to disk.)\\n' ) sys.stderr.write( '\\tPosition parameter", "be handled properly? # Example line from the mpileup file:", "variant_line = 'insertion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); #", "most_common_deletion_length; variant_line = ( \"del\\tpos = %s\\tref = %s\\tnon_indel_cov_next =", "elif base == r\"-\": # This marks the occurance of", "ret_deletion_count[0] += most_common_deletion_length; ret_deletion_count[0] += 1 # variant_line = 'deletion\\t%d\\t%s\\t%s\\t%s\\t%s'", "compared the total coverage of the current base with the", "position = split_line[1] ref_base = split_line[2] coverage = split_line[3] original_bases", "= \"%s-cov_%d.variant.vcf\" % (output_prefix, coverage_threshold) fp_vcf = open(vcf_file, \"w\") fp_vcf.write(\"##fileformat=VCFv4.0\\n\")", "the actual bases that are deleted (these bases follow the", "= len(sorted_deletion_counts[-1][0]) deletion_unique = ( True if ( sum( [", "\"%s\"...\\n' % alignments_path) sys.stderr.write('Reference file \"%s\"...\\n' % reference_path) sys.stderr.write(\"Coverage threshold:", "== bed_chromosome: current_pos = int(line_split[1]) if current_pos < bed_pos_start or", "== \"\": dir_name = \".\" alignments_path_bam = ( dir_name +", "the position where a deletion occurs, with the first base", "to a low frequency variant).\">\\n' ) fp_vcf.write( '##INFO=<ID=HRUN,Number=1,Type=Integer,Description=\"Homopolymer length to", "+= 1 ret_num_called_bases[0] += most_common_insertion_length # variant_line = 'insertion\\t%d\\t%s\\t%s\\t%s\\t%s' %", "******************** 8,2*#-;)$B>2$1&D- # I chose to handle them as undercovered", "+ \"\\n\") sys.stderr.write(\"\\n\") if output_prefix != \"\": # summary_file =", "position; # print 'bases: \"%s\"' % bases; # print 'line_number:", "(str(sorted_base_counts[-1][0])) ) qual = 1000 info = \"DP=%s;TYPE=del\" % (coverage)", "= %s\\tdeletion_counts = %s\\t%s\\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base,", "deletion_unique = False if ( most_common_insertion_count > most_common_deletion_count and most_common_insertion_count", "deletion_unique = ( True if ( sum( [ int(deletion_count[1] ==", "If we wound up at this base (i.e. this base", "+ 1))), ) ) sys.stderr.flush() variant_list_length = len(ret_variant_list) vcf_list_length =", "counted prior to occuring, and at that point is already", "Check if a BAM file with the given name already", "except Exception as e: most_common_base_count = 0 pass # variant_line", "-A -f %s %s > %s.mpileup\" % ( reference_path, alignments_path_bam,", "): fp = None try: fp = open(mpileup_path, \"r\") except", "writing!\\n' % (summary_file) ) return None return None def main(", "most_common_insertion_count = sorted_insertion_counts[-1][1] most_common_insertion_length = len(sorted_insertion_counts[-1][0]) insertion_unique = ( True", "character. # Get the number of bases that need to", "current base with the coverage threshold. # However, the total", "= 0 for line in fp: # line = lines[i];", "\"\\n\") fp_vcf.flush() i += num_bases_to_skip i += 1 j +=", "( (float(ret_coverage_sum[0]) / float((i + 1))) ) sys.stderr.write(summary_lines + \"\\n\")", "sys.stdout.write(\"%s\\nbase_counts: %s\\n\" % (line.strip(), str(base_counts))) # EDIT: Previously I compared", "else: most_common_insertion_count = 0 most_common_insertion_length = 0 insertion_unique = False", "% ret_snp_count[0] summary_lines += \"insertion_count: %d\\n\" % ret_insertion_count[0] summary_lines +=", "Create the BAM index file. command = \"samtools index %s", "an INDEL.\">\\n' ) fp_vcf.write( '##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description=\"Indicates that the variant is a", "os.path.exists(os.path.dirname(output_prefix)): os.makedirs(os.path.dirname(output_prefix)) variant_file = \"%s-cov_%d.variant.csv\" % (output_prefix, coverage_threshold) fp_variant =", "end of a read. end_counts += 1 elif base ==", "if (i > 10000): # break; fp.close() sys.stderr.write(\"\\n\") if fp_variant", "sorted_base_counts = [[\"A\", 0], [\"C\", 0], [\"T\", 0], [\"G\", 0]]", "== False: if len(sorted_base_counts) > 0: ret_snp_count[0] += 1 #", "bed_position, ) def CollectSummaries( sam_files, prefix_for_intermediate_results, collective_output_file ): fp_collect =", "= 0 current_base_deletion_count = 0 deletion_count = 0 insertion_event_counts =", "# consensus), then the deletions on this base are ignored.", "reference. is_good = False for base_count in sorted_base_counts: if base_count[1]", "= False bed_chromosome = \"\" bed_pos_start = 0 # bed_pos_end", "dict_counter[value] = 1 def process_mpileup_line( line, line_number, ret_variant_list, ret_vcf_list, ret_snp_count,", "alignments_path, reference_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ): # Sanity checking", "### Handling indel consensus. ### Put a different coverage threshold.", "== \"__main__\": # if (len(sys.argv) < 5): # sys.stderr.write('Usage:\\n'); #", "0 for line in fp: # line = lines[i]; if", "= split_line[4] if len(split_line) == 6: qualities = split_line[5] bases", "bases[j : (j + num_bases)].upper() increase_in_dict(deletion_event_counts, deletion) # Skip the", "are multiple equally good choices. # In this case, we", "analyzing BWA-MEM's mpileup. # There are pileup bases that do", "# isn't relevant, as deletions are counted prior to occuring,", "# If there are deletions, get the most common one.", "= %s\\tdeletion_counts = %s\\t%s\" % ( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base,", "or int(coverage) == current_base_deletion_count): # if (non_indel_coverage_current_base < coverage_threshold): if", "j += 1 continue if thread_id == 0: if (j", "the most common one. if len(list(insertion_event_counts.keys())) > 0: sorted_insertion_counts =", "even in the reads ### which had a '*' at", "deletion = bases[j : (j + num_bases)].upper() increase_in_dict(deletion_event_counts, deletion) #", "plus the actual number of bases # that need to", "to handle them as undercovered bases. non_indel_coverage_current_base = int(coverage) -", "from the mpileup file: # gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ********************", "if there actually were any deletions (to avoid index out", "coverage_threshold, output_prefix, thread_id, bed_position, ) def CollectSummaries( sam_files, prefix_for_intermediate_results, collective_output_file", "ret_variant_list.append(variant_line) ### VCF output ### alt_base = ( (\"{}\") if", "thread_id, bed_position, ) def CollectSummaries( sam_files, prefix_for_intermediate_results, collective_output_file ): fp_collect", "a sorted BAM. alignments_path_bam = alignments_path if os.path.exists(alignments_path) == False:", "0; i = 0 if (use_bed == False) else max((bed_pos_start", "+ \"\\n\") subprocess.call(command, shell=\"True\") # Create the BAM index file.", "== False: sys.stderr.write( 'ERROR: File extension needs to be either", ") if ( non_indel_coverage_next_base + deletion_count + insertion_count ) >", "This marks the end of a read. end_counts += 1", "r\"-\": # This marks the occurance of deletions. It is", "the existence of the file, and the correctness of its", "if int(coverage) < coverage_threshold: ret_num_undercovered_bases[0] += 1 # ret_coverage_sum[0] +=", "\"\\n\") subprocess.call(command, shell=\"True\") # Create the BAM index file. command", "sam_file in sam_files: summary_file = prefix_for_intermediate_results + \".sum\" try: fp_sum", "i += 1 j += 1 continue else: # print", "str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### VCF output ###", "will be written to disk.)\\n'); # exit(1); if len(sys.argv) <", "break if is_good == False: if len(sorted_base_counts) > 0: ret_snp_count[0]", "% bed_pos_end) # i = 0; i = 0 if", "or .bam! Input file path: \"%s\".\\n' % alignments_path ) return", "a given SAM/BAM file, and calls consensus bases (or variants).", "'*' sign, which I think # isn't relevant, as deletions", "or current_pos >= bed_pos_end: i += 1 j += 1", "operator import subprocess def increase_in_dict(dict_counter, value): try: dict_counter[value] += 1", "# ret_variant_list.append(variant_line); # return most_common_deletion_length; variant_line = ( \"del\\tpos =", "coverage of this base, or the non_indel_coverage_current_base? most_common_base_count = 0", "shell=\"True\") sys.stderr.write('Processing file \"%s\"...\\n' % alignments_path) sys.stderr.write('Reference file \"%s\"...\\n' %", "choices. # In this case, we prefer the choice which", "are a clear winner. if deletion_unique == True: # ret_deletion_count[0]", "%s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\" %", "False for base_count in sorted_base_counts: if base_count[1] == most_common_base_count: if", "= %s\\t%s\" % ( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_deletion_count, ref_base,", "bed_pos_start) sys.stderr.write(\"\\tEnd: %d\\n\\n\" % bed_pos_end) # i = 0; i", "bases[i + 1].upper()); i += 1 # Increase only by", "sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); variant_line = (", "fp_vcf.write(\"##source=%s\\n\" % (\" \".join(sys.argv))) fp_vcf.write(\"##reference=%s\\n\" % reference_path) fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw Depth\">\\n') fp_vcf.write(", "if base == r\"^\": # This is the starting position", "equal to the reference. is_good = False for base_count in", "\"DP=%s;TYPE=del\" % (coverage) ref_field = \"%s%s\" % (ref_base, sorted_deletion_counts[-1][0]) alt_field", "have any actual bases, but only the '*' symbols. How", "# sys.stderr.write('bed_position: \"%s\"\\n\\n' % bed_position); processes = [] if output_prefix", "with the actual reference base. i = 0 while i", "need to be skipped. i += skip_bases elif base ==", "# increase_in_dict(base_counts, bases[i + 1].upper()); i += 1 # Increase", "int(line_split[1]) if current_pos < bed_pos_start or current_pos >= bed_pos_end: i", "if len(sys.argv) < 5: sys.stderr.write(\"Usage:\\n\") sys.stderr.write( \"\\t%s <reference_file_path> coverage_threshold <output_prefix>", "e: most_common_base_count = 0 pass # variant_line = 'undercovered1\\tpos =", "\"%s%s\" % (ref_base, sorted_deletion_counts[-1][0]) alt_field = ref_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\"", "thread_id == 0: if (j % 1000) == 0: sys.stderr.write(", "> vcf_list_length and fp_vcf != None: fp_vcf.write(\"\\n\".join(ret_vcf_list[vcf_list_length:]) + \"\\n\") fp_vcf.flush()", "\"\\n\") sys.stderr.write(\"\\n\") if output_prefix != \"\": # summary_file = output_prefix", "Exception as e: most_common_base_count = 0 pass # variant_line =", "(len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])) ) qual = 1000 info", "= len(ret_variant_list) vcf_list_length = len(ret_vcf_list) num_bases_to_skip = process_mpileup_line( line, i,", "\"N\" vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field, alt_field,", "were any insertions (to avoid index out of bounds error).", "str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### VCF output ### alt_base", "deletions one by one # through the '*' character. #", "bed_position) # if (output_prefix != '-'): # CollectSummaries([sam_file], output_prefix, output_prefix", "insertion_unique = ( True if ( sum( [ int(insertion_count[1] ==", "file \"%s\" for reading!\\n' % summary_file ) continue fp_collect.write(\"\".join(lines) +", "from a given SAM/BAM file, and calls consensus bases (or", "= %s\\tdeletion_counts = %s\" % ( position, ref_name, int(coverage), str(sorted_base_counts),", "return # Convert the sorted BAM file to a mpileup", "with the coverage threshold. # However, the total coverage also", "increase_in_dict(dict_counter, value): try: dict_counter[value] += 1 except: dict_counter[value] = 1", "the actual reference base. i = 0 while i <", "base didn't get skipped because of a deletion # consensus),", "# most_common_base_count = 0; # Allow for the case where", "while i < len(bases): base = bases[i] if base ==", "insertion = bases[j : (j + num_bases)].upper() increase_in_dict(insertion_event_counts, insertion) i", "= ( True if ( sum( [ int(insertion_count[1] == most_common_insertion_count)", "original_bases[i] == \".\" or original_bases[i] == \",\": bases += ref_base", "line.strip()); # ret_variant_list.append(variant_line); try: temp_sorted_bc = sorted_base_counts[-1][0] except: temp_sorted_bc =", "or int(position) == 1000000 or int(position) == 2000000 or int(position)", "if use_bed == True: line_split = line.strip().split(\"\\t\") if len(line_split) >", "sys.argv[4] bed_position = \"\" if len(sys.argv) > 5: bed_position =", "if len(split_line) == 6: qualities = split_line[5] bases = \"\"", "position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_deletion_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts),", "= %d, deletions = %d, undercovered = %d, coverage =", "output_prefix + '.conssum'; summary_file = \"%s-cov_%d.variant.sum\" % (output_prefix, coverage_threshold) try:", "most_common_deletion_count = sorted_deletion_counts[-1][1] most_common_deletion_length = len(sorted_deletion_counts[-1][0]) deletion_unique = ( True", "non_indel_coverage_next_base ): # In this case, insertions are a clear", "0: if (j % 1000) == 0: sys.stderr.write( \"\\r[%d] snps", "%s.mpileup\" % ( reference_path, alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\") sys.stderr.write('Processing", "sys.stderr.write( 'ERROR: Could not open file \"%s\" for writing!\\n' %", "open(summary_file, \"r\") lines = fp_sum.readlines() fp_sum.close() except IOError: sys.stderr.write( 'ERROR:", "0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) sys.stderr.write(\"\\n\")", "%s\\tdeletion_counts = %s\\t%s\\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0],", "or the # insertion/deletion count is ambiguous. pass return 0", "if bed_position != \"\": bed_split = bed_position.split(\":\") if len(bed_split) !=", "if (j % 1000) == 0: sys.stderr.write( \"\\r[%d] snps =", "of the numeric entry plus the actual number of bases", "'##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant is an INDEL.\">\\n' ) fp_vcf.write( '##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description=\"Indicates", "writing!\\n' % collective_output_file ) return for sam_file in sam_files: summary_file", "if len(ret_vcf_list) > vcf_list_length and fp_vcf != None: fp_vcf.write(\"\\n\".join(ret_vcf_list[vcf_list_length:]) +", "1000) == 0: sys.stderr.write( \"\\r[%d] snps = %d, insertions =", "bed_pos_end) # i = 0; i = 0 if (use_bed", "open(summary_file, \"w\") fp_sum.write(summary_lines) fp_sum.close() return summary_file except IOError: sys.stderr.write( 'ERROR:", "\"w\") except IOError: sys.stderr.write( 'ERROR: Could not open file \"%s\"", "subprocess.call(command, shell=\"True\") # Create the BAM index file. command =", "I think # isn't relevant, as deletions are counted prior", "EDIT: Previously I compared the total coverage of the current", "( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_deletion_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts),", "%s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s' %", "if it doesn't exist yet. mpileup_path = \"%s.mpileup\" % alignments_path_bam", "sam_files: summary_file = prefix_for_intermediate_results + \".sum\" try: fp_sum = open(summary_file,", "report indel position\">\\n' ) fp_vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\") fp_vcf.flush() use_bed = False bed_chromosome", "= %d\\tmost_common_insertion_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts", "'ERROR: Could not open file \"%s\" for writing!\\n' % collective_output_file", "print 'position: %s' % position; # print 'bases: \"%s\"' %", "%s\\tdeletion_counts = %s\\t%s\" % ( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count,", "count. # Get the number of bases that are inserted;", "while i < len(original_bases): if original_bases[i] == \".\" or original_bases[i]", "len(list(insertion_event_counts.keys())) > 0: sorted_insertion_counts = sorted( list(insertion_event_counts.items()), key=operator.itemgetter(1) ) most_common_insertion_count", "bed_position.split(\":\") if len(bed_split) != 2: use_bed = False else: bed_chromosome", "True: line_split = line.strip().split(\"\\t\") if len(line_split) > 2 and line_split[0]", "# TODO: Should I count total coverage of this base,", "\"-\", no files will be written to disk.)\\n'); # exit(1);", "bed_pos_start or current_pos >= bed_pos_end: i += 1 j +=", "- i) + num_bases - 1 deletion_count += 1 deletion", "else: # print line_split[0]; # print bed_chromosome; i += 1", "are pileup bases that do not have any actual bases,", "( True if ( sum( [ int(insertion_count[1] == most_common_insertion_count) for", "sys import operator import subprocess def increase_in_dict(dict_counter, value): try: dict_counter[value]", "the occurance of deletions. It is a composite object #", "the non_indel_coverage_current_base? most_common_base_count = 0 ### Handling base consensus. sorted_base_counts", "'\\n'); # sys.stderr.write('sorted_base_counts:\\n'); # sys.stderr.write(str(sorted_base_counts) + '\\n'); # sys.stderr.write('base_counts:\\n'); #", "going # to be a deletion event. If we wound", "current position (because we don't know where it ends). non_indel_coverage_next_base", "BAM index file. command = \"samtools index %s %s.bai\" %", "== True: sys.stdout.write(\"Reference base: %s\\n\" % (ref_base)) sys.stdout.write(\"Consensus base: %s\\n\\n\"", "str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); try: temp_sorted_bc = sorted_base_counts[-1][0] except: temp_sorted_bc", "= None if output_prefix != \"\": if not os.path.exists(os.path.dirname(output_prefix)): os.makedirs(os.path.dirname(output_prefix))", "the base which is the same as ref, but the", "check. split_line = line.strip().split(\"\\t\") if len(split_line) < 5 or len(split_line)", "case, deletions are a clear winner. if deletion_unique == True:", "(output_prefix, coverage_threshold) fp_vcf = open(vcf_file, \"w\") fp_vcf.write(\"##fileformat=VCFv4.0\\n\") fp_vcf.write(\"##fileDate=20150409\\n\") fp_vcf.write(\"##source=%s\\n\" %", ") variant_line = ( \"SNP\\tpos = %s\\tref = %s\\tcoverage =", "of the deleted bases # and the actual bases that", "when analyzing BWA-MEM's mpileup. # There are pileup bases that", "for the deletions denoted with the '*' sign, which I", "the alt field contains the ref base + the insertion", "None: fp_variant.close() if fp_vcf != None: fp_vcf.close() summary_lines = \"\"", "# Create the BAM index file. command = \"samtools index", "len(lines); bed_pos_end = -1 if bed_position != \"\": bed_split =", "Similar to the deletion marking, but here we actually care", "continue else: # print line_split[0]; # print bed_chromosome; i +=", "= %d, coverage = %.2f\" % ( i, ret_snp_count[0], ret_insertion_count[0],", ".sam or .bam! Input file path: \"%s\".\\n' % alignments_path )", "deletion event. ### VCF output ### alt_base = ( (\"{}\")", "1 while bases[j] in \"0123456789\": j += 1 num_bases =", ") ret_variant_list.append(variant_line) ### VCF output ### qual = 1000 info", "| samtools sort - %s\" % ( alignments_path, os.path.splitext(alignments_path_bam)[0], )", "sorted( list(deletion_event_counts.items()), key=operator.itemgetter(1) ) most_common_deletion_count = sorted_deletion_counts[-1][1] most_common_deletion_length = len(sorted_deletion_counts[-1][0])", "num_bases)].upper() increase_in_dict(deletion_event_counts, deletion) # Skip the length of the numeric", "# line = lines[i]; if num_bases_to_skip > 0: num_bases_to_skip -=", "bases that do not have any actual bases, but only", "% reference_path) fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw Depth\">\\n') fp_vcf.write( '##INFO=<ID=TYPE,Number=A,Type=String,Description=\"Type of each allele (snp,", "+= 1 down there. elif base == r\"$\": # This", "consensus bases (or variants). import os import sys import operator", "# In our approach, we ignore this case, because we", "fp_sum = open(summary_file, \"r\") lines = fp_sum.readlines() fp_sum.close() except IOError:", "i < len(original_bases): if original_bases[i] == \".\" or original_bases[i] ==", "= \"\" if len(sys.argv) > 5: bed_position = sys.argv[5] #", "= {} deletion_event_counts = {} end_counts = 0 # print", "% ( position, ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, (\"{}\") if", "% (line.strip(), str(base_counts))) # EDIT: Previously I compared the total", "insertions = %d, deletions = %d, undercovered = %d, coverage", "( position, ref_name, int(coverage), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), ) ) ret_variant_list.append(variant_line)", "ref-reverse, alt-forward and alt-reverse bases\">\\n' ) fp_vcf.write( '##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the", "fp: # line = lines[i]; if num_bases_to_skip > 0: num_bases_to_skip", "= \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field, alt_field, qual, info,", "the SAM file to a sorted BAM file. command =", "str(insertion_event_counts), str(deletion_event_counts), ) ) ret_variant_list.append(variant_line) ### VCF output ### qual", "summary_file except IOError: sys.stderr.write( 'ERROR: Could not open file \"%s\"", "actual number of bases # that need to be skipped.", "\".sum\" try: fp_sum = open(summary_file, \"r\") lines = fp_sum.readlines() fp_sum.close()", ": (j + num_bases)].upper() increase_in_dict(deletion_event_counts, deletion) # Skip the length", "# print ''; # sys.stdout.flush(); i = 0 while i", ") ret_vcf_list.append(vcf_line) ################## elif ( most_common_deletion_count > most_common_insertion_count and most_common_deletion_count", "int(bed_pos_split[1]) use_bed = True sys.stderr.write(\"Using location specified through commandline:\\n\") sys.stderr.write('\\tChromosome:", "break; fp.close() sys.stderr.write(\"\\n\") if fp_variant != None: fp_variant.close() if fp_vcf", "use_bed == True: line_split = line.strip().split(\"\\t\") if len(line_split) > 2", "number of bases that are inserted; j = i +", ": j]) skip_bases = (j - i) + num_bases -", "this base, or the non_indel_coverage_current_base? most_common_base_count = 0 ### Handling", "str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); try: temp_sorted_bc = sorted_base_counts[-1][0]", "in the string. j = i + 1 while bases[j]", "= open(summary_file, \"w\") fp_sum.write(summary_lines) fp_sum.close() return summary_file except IOError: sys.stderr.write(", "'.' and ',' signs with the actual reference base. i", "most_common_insertion_count > non_indel_coverage_next_base ): # In this case, insertions are", "output_prefix, 0, bed_position) # if (output_prefix != '-'): # CollectSummaries([sam_file],", "fp_sum.write(summary_lines) fp_sum.close() return summary_file except IOError: sys.stderr.write( 'ERROR: Could not", "current_pos = int(line_split[1]) if current_pos < bed_pos_start or current_pos >=", "bases += original_bases[i] i += 1 base_counts = {} insertion_count", "= False else: bed_chromosome = bed_split[0] bed_pos_split = bed_split[1].split(\"-\") if", "0 insertion_event_counts = {} deletion_event_counts = {} end_counts = 0", "False: sys.stderr.write( 'ERROR: File extension needs to be either .sam", "coverage_threshold) summary_file = process_mpileup( alignments_path, reference_path, (\"%s.mpileup\" % alignments_path_bam), coverage_threshold,", "coverage_threshold: ret_num_undercovered_bases[0] += 1 # ret_coverage_sum[0] += 0; ret_coverage_sum[0] +=", "sys.stderr.write(str(sorted_base_counts) + '\\n'); # sys.stderr.write('base_counts:\\n'); # sys.stderr.write(str(base_counts) + '\\n'); #", "most_common_deletion_count and most_common_insertion_count > non_indel_coverage_next_base ): # In this case,", "sys.argv[1] coverage_threshold = int(sys.argv[2]) output_prefix = sys.argv[3] sam_file = sys.argv[4]", "%s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count,", "2000000 or int(position) == 3000000 or int(position) == 4000000): #", "False # Sanity check, just to see if there actually", "fp_sum.close() return summary_file except IOError: sys.stderr.write( 'ERROR: Could not open", "deletion_unique == True: # ret_deletion_count[0] += most_common_deletion_length; ret_deletion_count[0] += 1", "# sys.stderr.write(str(sorted_base_counts) + '\\n'); # sys.stderr.write('base_counts:\\n'); # sys.stderr.write(str(base_counts) + '\\n');", "# i = 0; i = 0 if (use_bed ==", "first base being non-deletion, and the following bases being a", "bed_split[1].split(\"-\") if len(bed_pos_split) != 2: use_bed = False else: bed_pos_start", "sys.stderr.write('original_bases:\\n'); # sys.stderr.write(str(original_bases) + '\\n'); # sys.stderr.write('line:\\n'); # sys.stderr.write(line.strip() +", "= 1000 info = \"DP=%s;TYPE=del\" % (coverage) ref_field = \"%s%s\"", "'##INFO=<ID=DP4,Number=4,Type=Integer,Description=\"Counts for ref-forward bases, ref-reverse, alt-forward and alt-reverse bases\">\\n' )", "total coverage of this base, or the non_indel_coverage_current_base? most_common_base_count =", "to a mpileup file if it doesn't exist yet. mpileup_path", "< coverage_threshold: ret_num_undercovered_bases[0] += 1 # ret_coverage_sum[0] += 0; ret_coverage_sum[0]", "= lines[i]; if num_bases_to_skip > 0: num_bases_to_skip -= 1 continue", "aware count. # Get the number of bases that are", "non_indel_coverage_current_base? most_common_base_count = 0 ### Handling base consensus. sorted_base_counts =", "== 0) else (str(sorted_base_counts[-1][0])) ) qual = 1000 info =", "base are ignored. # if (int(coverage) < coverage_threshold or int(coverage)", "(str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) sys.stderr.write(\"\\n\") else: ret_num_correct_bases[0]", "alt-reverse bases\">\\n' ) fp_vcf.write( '##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant is an", "variant_line = 'undercovered1\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count =", "start and a char marking the # mapping quality of", "follow the current position). # Similar to the deletion marking,", "non_indel_coverage_next_base + deletion_count + insertion_count ) > coverage_threshold: # Sanity", "sorted( list(base_counts.items()), key=operator.itemgetter(1) ) try: most_common_base_count = sorted_base_counts[-1][1] except Exception", "if (int(position) == 100000 or int(position) == 1000000 or int(position)", "except: dict_counter[value] = 1 def process_mpileup_line( line, line_number, ret_variant_list, ret_vcf_list,", "= ref_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field,", "Increase only by 1, because we have i += 1", "chose to handle them as undercovered bases. non_indel_coverage_current_base = int(coverage)", "threshold: %d\\n\" % coverage_threshold) summary_file = process_mpileup( alignments_path, reference_path, (\"%s.mpileup\"", "== True: sys.stdout.write(\"%s\\nbase_counts: %s\\n\" % (line.strip(), str(base_counts))) # EDIT: Previously", "non_indel_coverage_next_base ): # In this case, deletions are a clear", "\"DP=%s;TYPE=ins\" % (coverage) ref_field = ref_base alt_field = \"%s%s\" %", "the deletions on this base are ignored. # if (int(coverage)", "(int(position) == 100000 or int(position) == 1000000 or int(position) ==", "sys.stderr.write( '\\t(If <collective_output_file> is equal to \"-\", no files will", "else (str(sorted_base_counts[-1][0])) ) qual = 1000 info = \"DP=%s;TYPE=snp\" %", "[<{sb}am_file_2> <{sb}am_file_3> ...]\\n' % sys.argv[0]); # sys.stderr.write('\\t(If <collective_output_file> is equal", "j = 0 # while (i < bed_pos_end): # len(lines)):", "and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path) ): command = \"samtools mpileup -B", "key=operator.itemgetter(1) ) try: most_common_base_count = sorted_base_counts[-1][1] except Exception as e:", "base with the coverage threshold. # However, the total coverage", "have i += 1 down there. elif base == r\"$\":", "True: sys.stdout.write(\"%s\\nbase_counts: %s\\n\" % (line.strip(), str(base_counts))) # EDIT: Previously I", "# consisting of: the special character '+', the number of", "+ \"\\n\") fp_collect.close() if __name__ == \"__main__\": # if (len(sys.argv)", "= 'undercovered1\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count = %d\\tref_base", "line.strip()); ### Handling indel consensus. ### Put a different coverage", "ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, (\"{}\") if (len(sorted_base_counts) == 0)", "= %.2f\" % ( i, ret_snp_count[0], ret_insertion_count[0], ret_deletion_count[0], ret_num_undercovered_bases[0], (float(ret_coverage_sum[0])", "# Example line from the mpileup file: # gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202", "(snp, ins, del, mnp, complex)\">\\n' ) fp_vcf.write( '##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele Frequency\">\\n' )", "(i > 10000): # break; fp.close() sys.stderr.write(\"\\n\") if fp_variant !=", "len(bed_pos_split) != 2: use_bed = False else: bed_pos_start = int(bed_pos_split[0])", "% (coverage) ref_field = ref_base alt_field = \"N\" vcf_line =", "try: temp_sorted_bc = sorted_base_counts[-1][0] except: temp_sorted_bc = 0 indel_length =", "sys.argv[0] ) sys.stderr.write( '\\t(If <collective_output_file> is equal to \"-\", no", "sys.stderr.write('\\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\\n' % sys.argv[0]);", "event. ### VCF output ### alt_base = ( (\"{}\") if", "= [0] ret_num_correct_bases = [0] ret_coverage_sum = [0] # lines", "= os.path.splitext(sam_file)[0] main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position) # if", "no files will be written to disk.)\\n' ) sys.stderr.write( '\\tPosition", "1 def process_mpileup_line( line, line_number, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count,", ") qual = 1000 info = \"DP=%s;TYPE=del\" % (coverage) ref_field", "file with the given name already exists. if alignments_path_bam_exists ==", ") return None ret_variant_list = [] ret_vcf_list = [] ret_snp_count", ") most_common_deletion_count = sorted_deletion_counts[-1][1] most_common_deletion_length = len(sorted_deletion_counts[-1][0]) deletion_unique = (", "+= \"num_called_bases: %d\\n\" % ret_num_called_bases[0] summary_lines += \"num_correct_bases: %d\\n\" %", "index file. command = \"samtools index %s %s.bai\" % (", "= 0 most_common_insertion_length = 0 insertion_unique = False # Sanity", "case, discovered this on 03.11.2014., when analyzing BWA-MEM's mpileup. #", "marks the end of a read. end_counts += 1 elif", "sys.stderr.write(line.strip() + '\\n'); # most_common_base_count = 0; # Allow for", "alignments_path_bam = ( dir_name + \"/\" + os.path.splitext(os.path.basename(alignments_path))[0] + \".bam\"", "%s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_insertion_count = %d\\tref_base =", "the path where the new BAM file will be generated.", ") fp_vcf.write( '##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant is an INDEL.\">\\n' )", "needs to be either .sam or .bam! Input file path:", "and ',' signs with the actual reference base. i =", "process_mpileup_line( line, line_number, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases,", "== \",\": bases += ref_base else: bases += original_bases[i] i", "if os.path.exists(alignments_path) == False: sys.stderr.write('ERROR: File \"%s\" does not exist!\\n'", "coverage_threshold) fp_vcf = open(vcf_file, \"w\") fp_vcf.write(\"##fileformat=VCFv4.0\\n\") fp_vcf.write(\"##fileDate=20150409\\n\") fp_vcf.write(\"##source=%s\\n\" % (\"", "( reference_path, alignments_path_bam, alignments_path_bam, ) subprocess.call(command, shell=\"True\") sys.stderr.write('Processing file \"%s\"...\\n'", "'ERROR: Could not open file \"%s\" for reading!\\n' % summary_file", "the actual number of bases # that need to be", "0], [\"T\", 0], [\"G\", 0]] sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1)", "then convert it to a sorted BAM. alignments_path_bam = alignments_path", "alt field contains the ref base + the insertion event.", "end_counts = 0 # print 'position: %s' % position; #", "> 6: sys.stderr.write(line + \"\\n\") return 0 ref_name = split_line[0]", "== 3000000 or int(position) == 4000000): # print '\\nTEST\\tpos =", "= os.path.exists(alignments_path_bam) # Check if a BAM file with the", "\".\" alignments_path_bam = ( dir_name + \"/\" + os.path.splitext(os.path.basename(alignments_path))[0] +", "\"%s\" for writing!\\n' % collective_output_file ) return for sam_file in", "'*' at the current position (because we don't know where", "summary_lines += \"deletion_count: %d\\n\" % ret_deletion_count[0] summary_lines += \"num_undercovered_bases: %d\\n\"", "elif base == r\"*\": # This is a deletion, just", "base + the insertion event. ### VCF output ### alt_base", "non_indel_coverage_current_base, most_common_insertion_count, ref_base, temp_sorted_bc, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) )", "= %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\\n'", "pass return 0 def process_mpileup( alignments_path, reference_path, mpileup_path, coverage_threshold, output_prefix,", "bases[i].upper()) i += 1 # TODO: An additional problematic case,", "1 ) else False ) else: most_common_deletion_count = 0 most_common_deletion_length", "ret_variant_list.append(variant_line) ### Deletions in the VCF format specifies the position", "+= 1 continue if thread_id == 0: if (j %", "\"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field, alt_field, qual, info, )", "position, ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## elif (", "discovered this on 03.11.2014., when analyzing BWA-MEM's mpileup. # There", "sys.stderr.write( 'ERROR: Could not open file \"%s\" for reading!\\n' %", "current_base_deletion_count if verbose == True: sys.stdout.write(\"%s\\nbase_counts: %s\\n\" % (line.strip(), str(base_counts)))", "to the right of report indel position\">\\n' ) fp_vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\") fp_vcf.flush()", "and os.path.getmtime(alignments_path) > os.path.getmtime(alignments_path_bam) ): # Convert the SAM file", "file: # gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D- # I", "IOError: sys.stderr.write( 'ERROR: Could not open file \"%s\" for reading!\\n'", "in the sorted_base_counts!\" ) variant_line = ( \"SNP\\tpos = %s\\tref", "if insertion_unique == True: # ret_insertion_count[0] += most_common_insertion_length; ret_insertion_count[0] +=", "alignments_path, reference_path, mpileup_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ): fp =", "% line_number; # print line; # print ''; # sys.stdout.flush();", "fp_collect = open(collective_output_file, \"w\") except IOError: sys.stderr.write( 'ERROR: Could not", "be skipped in the string. j = i + 1", "!= None: fp_vcf.close() summary_lines = \"\" summary_lines += \"alignments_file: %s\\n\"", "the current position). # Similar to the deletion marking, but", "False) else max((bed_pos_start - 10), 0) j = 0 #", "if base_count[0] == ref_base: is_good = True break if is_good", "char marking the # mapping quality of the read. #", "most_common_insertion_length = 0 insertion_unique = False # Sanity check, just", "# while (i < bed_pos_end): # len(lines)): num_bases_to_skip = 0", "+= 1 insertion = bases[j : (j + num_bases)].upper() increase_in_dict(insertion_event_counts,", "Sanity checking the existence of the file, and the correctness", "In our approach, we ignore this case, because we count", "line; # print ''; # sys.stdout.flush(); i = 0 while", "% ( position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_deletion_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts),", "i += 1 j += 1 # if (i >", "( most_common_insertion_count > most_common_deletion_count and most_common_insertion_count > non_indel_coverage_next_base ): #", "given name already exists. if alignments_path_bam_exists == False or (", "if ( sum( [ int(deletion_count[1] == most_common_deletion_count) for deletion_count in", "r\"$\": # This marks the end of a read. end_counts", "handled properly? # Example line from the mpileup file: #", "info = \"DP=%s;TYPE=del\" % (coverage) ref_field = \"%s%s\" % (ref_base,", "def main( alignments_path, reference_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ): #", "if ( most_common_insertion_count > most_common_deletion_count and most_common_insertion_count > non_indel_coverage_next_base ):", "alignments_path.endswith(\"bam\") == False: sys.stderr.write( 'ERROR: File extension needs to be", "but the alt field contains the ref base + the", "wound up at this base (i.e. this base didn't get", "# If there are insertions, get the most common one.", "variant).\">\\n' ) fp_vcf.write( '##INFO=<ID=HRUN,Number=1,Type=Integer,Description=\"Homopolymer length to the right of report", "deletion_count = 0 insertion_event_counts = {} deletion_event_counts = {} end_counts", "i, ret_snp_count[0], ret_insertion_count[0], ret_deletion_count[0], ret_num_undercovered_bases[0], (float(ret_coverage_sum[0]) / float((i + 1))),", "line_split[0] == bed_chromosome: current_pos = int(line_split[1]) if current_pos < bed_pos_start", "# that need to be skipped. i += skip_bases elif", "original_bases[i] == \",\": bases += ref_base else: bases += original_bases[i]", "to be skipped in the string. j = i +", "File \"%s\" does not exist!\\n' % alignments_path) return if alignments_path.endswith(\"sam\"):", "\"%s-cov_%d.variant.csv\" % (output_prefix, coverage_threshold) fp_variant = open(variant_file, \"w\") vcf_file =", ") subprocess.call(command, shell=\"True\") elif alignments_path.endswith(\"bam\") == False: sys.stderr.write( 'ERROR: File", "%s\\tdeletion_counts = %s\" % ( position, ref_name, int(coverage), str(sorted_base_counts), str(insertion_event_counts),", "ref_base = split_line[2] coverage = split_line[3] original_bases = split_line[4] if", "to a sorted BAM file. command = \"samtools view -bS", "as undercovered bases. non_indel_coverage_current_base = int(coverage) - current_base_deletion_count if verbose", ") try: most_common_base_count = sorted_base_counts[-1][1] except Exception as e: most_common_base_count", "undercovered = %d, coverage = %.2f\" % ( i, ret_snp_count[0],", "1 continue if use_bed == True: line_split = line.strip().split(\"\\t\") if", "ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=False, ): # Split the line,", "ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count, ref_base, temp_sorted_bc, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(),", "samtools sort - %s\" % ( alignments_path, os.path.splitext(alignments_path_bam)[0], ) sys.stderr.write(command", "+= 1 deletion = bases[j : (j + num_bases)].upper() increase_in_dict(deletion_event_counts,", "we ignore this case, because we count deletions one by", "sys.stderr.write(str(e) + '\\n'); # sys.stderr.write('sorted_base_counts:\\n'); # sys.stderr.write(str(sorted_base_counts) + '\\n'); #", "%d\\n\" % ret_num_correct_bases[0] summary_lines += \"average_coverage: %.2f\\n\" % ( (float(ret_coverage_sum[0])", "-bS %s | samtools sort - %s\" % ( alignments_path,", "and line_split[0] == bed_chromosome: current_pos = int(line_split[1]) if current_pos <", "that need to be skipped in the string. j =", "\"\" bed_pos_start = 0 # bed_pos_end = len(lines); bed_pos_end =", "0: num_bases_to_skip -= 1 continue if use_bed == True: line_split", "): command = \"samtools mpileup -B -d 1000000 -Q 0", "summary_file = process_mpileup( alignments_path, reference_path, (\"%s.mpileup\" % alignments_path_bam), coverage_threshold, output_prefix,", "+ '\\n'); # sys.stderr.write('base_counts:\\n'); # sys.stderr.write(str(base_counts) + '\\n'); # sys.stderr.write('original_bases:\\n');", "None try: fp_collect = open(collective_output_file, \"w\") except IOError: sys.stderr.write( 'ERROR:", "the number of bases that are inserted; j = i", "== 1 ) else False ) else: most_common_insertion_count = 0", "\"-\": output_prefix = os.path.splitext(sam_file)[0] main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position)", "ref position should contain the base which is the same", "i + 1 while bases[j] in \"0123456789\": j += 1", "= \"samtools mpileup -B -d 1000000 -Q 0 -A -f", "any insertions (to avoid index out of bounds error). #", "# In this case, insertions are a clear winner. if", "inserted; j = i + 1 while bases[j] in \"0123456789\":", "str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), ) ) ret_variant_list.append(variant_line) ### VCF output ###", "100000 or int(position) == 1000000 or int(position) == 2000000 or", "reading!\\n' % summary_file ) continue fp_collect.write(\"\".join(lines) + \"\\n\") fp_collect.close() if", "out of bounds error). # If there are deletions, get", "same as ref, but the alt field contains the ref", "len(ret_variant_list) > variant_list_length and fp_variant != None: fp_variant.write(\"\\n\".join(ret_variant_list[variant_list_length:]) + \"\\n\")", "0 most_common_insertion_length = 0 insertion_unique = False # Sanity check,", "= len(lines); bed_pos_end = -1 if bed_position != \"\": bed_split", "a mpileup file if it doesn't exist yet. mpileup_path =", "-1 if bed_position != \"\": bed_split = bed_position.split(\":\") if len(bed_split)", "sys.stderr.write( \"\\r[%d] snps = %d, insertions = %d, deletions =", "0 current_base_deletion_count = 0 deletion_count = 0 insertion_event_counts = {}", "ret_snp_count = [0] ret_insertion_count = [0] ret_deletion_count = [0] ret_num_undercovered_bases", "count it. current_base_deletion_count += 1 elif base == r\"-\": #", "output_prefix != \"\": if not os.path.exists(os.path.dirname(output_prefix)): os.makedirs(os.path.dirname(output_prefix)) variant_file = \"%s-cov_%d.variant.csv\"", ") ret_vcf_list.append(vcf_line) ################## else: ret_num_called_bases[0] += 1 ret_coverage_sum[0] += int(coverage)", "Frequency\">\\n' ) fp_vcf.write( '##INFO=<ID=SB,Number=1,Type=Integer,Description=\"Phred-scaled strand bias at this position\">\\n' )", "int(insertion_count[1] == most_common_insertion_count) for insertion_count in sorted_insertion_counts ] ) ==", "# return most_common_deletion_length; variant_line = ( \"del\\tpos = %s\\tref =", "+ '.conssum'; summary_file = \"%s-cov_%d.variant.sum\" % (output_prefix, coverage_threshold) try: fp_sum", "= %s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_deletion_count = %d\\tref_base", "0 deletion_unique = False if ( most_common_insertion_count > most_common_deletion_count and", "as e: most_common_base_count = 0 pass # variant_line = 'undercovered1\\tpos", "verbose=False, ): # Split the line, and perform a sanity", "20 ******************** 8,2*#-;)$B>2$1&D- # I chose to handle them as", "= [0] ret_num_undercovered_bases = [0] ret_num_called_bases = [0] ret_num_correct_bases =", "== current_base_deletion_count): # if (non_indel_coverage_current_base < coverage_threshold): if int(coverage) <", "line_split = line.strip().split(\"\\t\") if len(line_split) > 2 and line_split[0] ==", "deletion event. If we wound up at this base (i.e.", "### Deletions in the VCF format specifies the position where", "prefer the choice which is equal to the reference. is_good", "low frequency variant).\">\\n' ) fp_vcf.write( '##INFO=<ID=HRUN,Number=1,Type=Integer,Description=\"Homopolymer length to the right", "file to a mpileup file if it doesn't exist yet.", "position of a read. It encodes two # symbols: '^'", "for the case where there are multiple equally good choices.", "= ( (\"{}\") if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])) )", "( \"del\\tpos = %s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_deletion_count", "### qual = 1000 info = \"DP=%s;TYPE=snp\" % (coverage) ref_field", "str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line); # return most_common_deletion_length; variant_line", "= sorted_base_counts[-1][1] except Exception as e: most_common_base_count = 0 pass", "fp_vcf.flush() i += num_bases_to_skip i += 1 j += 1", "line in fp: # line = lines[i]; if num_bases_to_skip >", "\",\": bases += ref_base else: bases += original_bases[i] i +=", "int(deletion_count[1] == most_common_deletion_count) for deletion_count in sorted_deletion_counts ] ) ==", "(\"%s.mpileup\" % alignments_path_bam), coverage_threshold, output_prefix, thread_id, bed_position, ) def CollectSummaries(", "(i.e. this base didn't get skipped because of a deletion", "= True sys.stderr.write(\"Using location specified through commandline:\\n\") sys.stderr.write('\\tChromosome: \"%s\"\\n' %", "0], [\"C\", 0], [\"T\", 0], [\"G\", 0]] sorted_base_counts = sorted(", "(these bases follow the current position). # In our approach,", "( mpileup_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path) ): command", "deletion # consensus), then the deletions on this base are", "None: fp_vcf.write(\"\\n\".join(ret_vcf_list[vcf_list_length:]) + \"\\n\") fp_vcf.flush() i += num_bases_to_skip i +=", "[[\"A\", 0], [\"C\", 0], [\"T\", 0], [\"G\", 0]] sorted_base_counts =", "1000000 -Q 0 -A -f %s %s > %s.mpileup\" %", "\"del\\tpos = %s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_deletion_count =", "= [0] ret_deletion_count = [0] ret_num_undercovered_bases = [0] ret_num_called_bases =", "# print line; # print ''; # sys.stdout.flush(); i =", "%s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\\n' % (position, int(coverage),", "1 # TODO: An additional problematic case, discovered this on", "common one. if len(list(deletion_event_counts.keys())) > 0: sorted_deletion_counts = sorted( list(deletion_event_counts.items()),", "%d\\tmost_common_insertion_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts =", "deletions are a clear winner. if deletion_unique == True: #", ">= bed_pos_end: i += 1 j += 1 continue else:", "== most_common_insertion_count) for insertion_count in sorted_insertion_counts ] ) == 1", "collective_output_file ): fp_collect = None try: fp_collect = open(collective_output_file, \"w\")", "summary_file = output_prefix + '.conssum'; summary_file = \"%s-cov_%d.variant.sum\" % (output_prefix,", "# to be a deletion event. If we wound up", "prefix_for_intermediate_results + \".sum\" try: fp_sum = open(summary_file, \"r\") lines =", "given SAM/BAM file, and calls consensus bases (or variants). import", "== 0: sys.stderr.write( \"\\r[%d] snps = %d, insertions = %d,", "(j + num_bases)].upper() increase_in_dict(deletion_event_counts, deletion) # Skip the length of", "ref_field = ref_base alt_field = alt_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" %", "',' signs with the actual reference base. i = 0", "marking the # mapping quality of the read. # increase_in_dict(base_counts,", "that are deleted (these bases follow the current position). #", "here we actually care about the bases, # and we", "# ret_variant_list.append(line_number); variant_line = ( \"SNP\\tpos = %s\\tref = %s\\tcoverage", ") qual = 1000 info = \"DP=%s;TYPE=ins\" % (coverage) ref_field", "of bases # that need to be skipped. i +=", "%s\\n\\n\" % (base_count[0])) # if (int(position) == 100000 or int(position)", "if deletion_unique == True: # ret_deletion_count[0] += most_common_deletion_length; ret_deletion_count[0] +=", "# variant_line = 'undercovered1\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count", "doesn't exist yet. mpileup_path = \"%s.mpileup\" % alignments_path_bam mpileup_exists =", "= split_line[2] coverage = split_line[3] original_bases = split_line[4] if len(split_line)", "non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_deletion_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), )", "False or ( alignments_path_bam_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(alignments_path_bam)", "+= int(coverage) # TODO: Should I count total coverage of", "the BAM index file. command = \"samtools index %s %s.bai\"", "i < len(bases): base = bases[i] if base == r\"^\":", "# variant_line = 'insertion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());", "%d\\tmost_common_base_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts = %s\\tinsertion_counts =", "= %s\\tinsertion_counts = %s\\tdeletion_counts = %s\" % ( position, ref_name,", "= ( True if ( sum( [ int(deletion_count[1] == most_common_deletion_count)", "skip_bases = (j - i) + num_bases - 1 insertion_count", "# # Creates a pileup from a given SAM/BAM file,", "are inserted (these bases follow the current position). # Similar", "%s\\tbase_counts = %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\" % ( position,", "sorted_insertion_counts[-1][1] most_common_insertion_length = len(sorted_insertion_counts[-1][0]) insertion_unique = ( True if (", "summary_lines = \"\" summary_lines += \"alignments_file: %s\\n\" % alignments_path summary_lines", "len(split_line) == 6: qualities = split_line[5] bases = \"\" #", "is_good = True break if is_good == False: if len(sorted_base_counts)", "entry plus the actual number of bases # that need", "int(coverage) - current_base_deletion_count if verbose == True: sys.stdout.write(\"%s\\nbase_counts: %s\\n\" %", "\"samtools view -bS %s | samtools sort - %s\" %", ") fp_vcf.write( '##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele Frequency\">\\n' ) fp_vcf.write( '##INFO=<ID=SB,Number=1,Type=Integer,Description=\"Phred-scaled strand bias at", "bed_chromosome = bed_split[0] bed_pos_split = bed_split[1].split(\"-\") if len(bed_pos_split) != 2:", ".bam! Input file path: \"%s\".\\n' % alignments_path ) return #", "this be handled properly? # Example line from the mpileup", "% position; # print 'bases: \"%s\"' % bases; # print", "already exists. if alignments_path_bam_exists == False or ( alignments_path_bam_exists ==", "base (i.e. this base didn't get skipped because of a", "sorted_base_counts[-1][1] except Exception as e: most_common_base_count = 0 pass #", "at this position\">\\n' ) fp_vcf.write( '##INFO=<ID=DP4,Number=4,Type=Integer,Description=\"Counts for ref-forward bases, ref-reverse,", ") sys.stderr.write( '\\tPosition parameter is a string specifying \"chromosome:start-end\"\\n\\n' )", "print bed_chromosome; i += 1 j += 1 continue if", "in \"0123456789\": j += 1 num_bases = int(bases[(i + 1)", "TODO: Should I count total coverage of this base, or", "the VCF format specifies the position where a deletion occurs,", "This marks the occurance of deletions. It is a composite", "out of bounds error). # If there are insertions, get", "ret_num_called_bases[0] += 1 ret_coverage_sum[0] += int(coverage) # TODO: Should I", "# sys.stderr.write(str(original_bases) + '\\n'); # sys.stderr.write('line:\\n'); # sys.stderr.write(line.strip() + '\\n');", "= 'deletion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip()); # ret_variant_list.append(variant_line);", "1].upper()); i += 1 # Increase only by 1, because", "\"samtools mpileup -B -d 1000000 -Q 0 -A -f %s", "ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## elif ( most_common_deletion_count", ") ) sys.stderr.flush() variant_list_length = len(ret_variant_list) vcf_list_length = len(ret_vcf_list) num_bases_to_skip", "False: sys.stderr.write('ERROR: File \"%s\" does not exist!\\n' % alignments_path) return", ") ) ret_variant_list.append(variant_line) ### Deletions in the VCF format specifies", "% bed_position); processes = [] if output_prefix == \"-\": output_prefix", "alignments_path_bam = alignments_path if os.path.exists(alignments_path) == False: sys.stderr.write('ERROR: File \"%s\"", "read. # increase_in_dict(base_counts, bases[i + 1].upper()); i += 1 #", "\"num_correct_bases: %d\\n\" % ret_num_correct_bases[0] summary_lines += \"average_coverage: %.2f\\n\" % (", "and alt-reverse bases\">\\n' ) fp_vcf.write( '##INFO=<ID=INDEL,Number=0,Type=Flag,Description=\"Indicates that the variant is", "= %s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\" % ( position, ref_name,", "= split_line[1] ref_base = split_line[2] coverage = split_line[3] original_bases =", "Split the line, and perform a sanity check. split_line =", "0 while i < len(bases): base = bases[i] if base", "= \"DP=%s;TYPE=ins\" % (coverage) ref_field = ref_base alt_field = \"%s%s\"", "0 def process_mpileup( alignments_path, reference_path, mpileup_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\",", "exit(1); if len(sys.argv) < 5: sys.stderr.write(\"Usage:\\n\") sys.stderr.write( \"\\t%s <reference_file_path> coverage_threshold", "\"%s\" for writing!\\n' % (summary_file) ) return None return None", "= alt_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field,", "1 ret_coverage_sum[0] += int(coverage) # TODO: Should I count total", "variants). import os import sys import operator import subprocess def", "mapping quality of the read. # increase_in_dict(base_counts, bases[i + 1].upper());", "return None def main( alignments_path, reference_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\",", "the base count consensus wins, or the # insertion/deletion count", "indel consensus. ### Put a different coverage threshold. Here we", "== \"-\": output_prefix = os.path.splitext(sam_file)[0] main(sam_file, reference_file, coverage_threshold, output_prefix, 0,", "the variant is a consensus variant (as opposed to a", "output_prefix, thread_id=0, bed_position=\"\", ): fp = None try: fp =", "0 pass # variant_line = 'undercovered1\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr", "(len(sys.argv) < 5): # sys.stderr.write('Usage:\\n'); # sys.stderr.write('\\t%s <reference_file_path> coverage_threshold <collective_output_file>", "\"\\n\") return 0 ref_name = split_line[0] position = split_line[1] ref_base", "If there are deletions, get the most common one. if", "line.strip(), ) ) ret_variant_list.append(variant_line) ### VCF output ### alt_base =", "checking the existence of the file, and the correctness of", "ref_field = ref_base alt_field = \"%s%s\" % (ref_base, sorted_insertion_counts[-1][0]) vcf_line", "count is ambiguous. pass return 0 def process_mpileup( alignments_path, reference_path,", "most_common_insertion_count, ref_base, temp_sorted_bc, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line)", "ret_num_undercovered_bases[0] += 1 # ret_coverage_sum[0] += 0; ret_coverage_sum[0] += int(coverage)", "# Similar to the deletion marking, but here we actually", "ret_variant_list.append(variant_line); variant_line = ( \"undercovered1\\tpos = %s\\tref = %s\\tcoverage =", "is the same as ref, but the alt field contains", "contains the ref base + the insertion event. ### VCF", "sys.stderr.write('base_counts:\\n'); # sys.stderr.write(str(base_counts) + '\\n'); # sys.stderr.write('original_bases:\\n'); # sys.stderr.write(str(original_bases) +", "- 1 insertion_count += 1 insertion = bases[j : (j", "bases # and the actual bases that are inserted (these", "insertion_count ) if ( non_indel_coverage_next_base + deletion_count + insertion_count )", "(str(sorted_base_counts[-1][0])) ) qual = 1000 info = \"DP=%s;TYPE=ins\" % (coverage)", "!= \"\": bed_split = bed_position.split(\":\") if len(bed_split) != 2: use_bed", "else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) sys.stderr.write(\"\\n\") else:", "and fp_vcf != None: fp_vcf.write(\"\\n\".join(ret_vcf_list[vcf_list_length:]) + \"\\n\") fp_vcf.flush() i +=", "alt_field, qual, info, ) ret_vcf_list.append(vcf_line) ################## return most_common_deletion_length else: #", "line from the mpileup file: # gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20", "to be a deletion event. If we wound up at", "str(insertion_event_counts), str(deletion_event_counts), line.strip()); ### Handling indel consensus. ### Put a", "(coverage) ref_field = ref_base alt_field = \"%s%s\" % (ref_base, sorted_insertion_counts[-1][0])", "if original_bases[i] == \".\" or original_bases[i] == \",\": bases +=", "(these bases follow the current position). # Similar to the", "= \"samtools index %s %s.bai\" % ( alignments_path_bam, alignments_path_bam, )", "if alignments_path.endswith(\"sam\"): # Determine the path where the new BAM", "any deletions (to avoid index out of bounds error). #", "return if alignments_path.endswith(\"sam\"): # Determine the path where the new", "consisting of: the special character '+', the number of the", "then the deletions on this base are ignored. # if", "be a deletion event. If we wound up at this", "base_count in sorted_base_counts: if base_count[1] == most_common_base_count: if base_count[0] ==", "Could not open file \"%s\" for reading!\\n' % summary_file )", "[0] ret_deletion_count = [0] ret_num_undercovered_bases = [0] ret_num_called_bases = [0]", "is an INDEL.\">\\n' ) fp_vcf.write( '##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description=\"Indicates that the variant is", "Creates a pileup from a given SAM/BAM file, and calls", "deletions = %d, undercovered = %d, coverage = %.2f\" %", "% (\" \".join(sys.argv))) fp_vcf.write(\"##reference=%s\\n\" % reference_path) fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Raw Depth\">\\n') fp_vcf.write( '##INFO=<ID=TYPE,Number=A,Type=String,Description=\"Type", "the read start and a char marking the # mapping", "(ref_base, sorted_insertion_counts[-1][0]) vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position, ref_field,", "get the most common one. if len(list(insertion_event_counts.keys())) > 0: sorted_insertion_counts", "disk.)\\n'); # exit(1); if len(sys.argv) < 5: sys.stderr.write(\"Usage:\\n\") sys.stderr.write( \"\\t%s", "> 2 and line_split[0] == bed_chromosome: current_pos = int(line_split[1]) if", "%d, deletions = %d, undercovered = %d, coverage = %.2f\"", "ret_deletion_count[0] summary_lines += \"num_undercovered_bases: %d\\n\" % ret_num_undercovered_bases[0] summary_lines += \"num_called_bases:", "%d\\tnon_indel_cov_curr = %d\\tmost_common_base_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts =", ") ret_variant_list.append(variant_line) ### VCF output ### alt_base = ( (\"{}\")", "\"\\r[%d] snps = %d, insertions = %d, deletions = %d,", "base_count[1] == most_common_base_count: if base_count[0] == ref_base: is_good = True", "bases (or variants). import os import sys import operator import", "+= 1 if verbose == True: sys.stdout.write(\"Reference base: %s\\n\" %", "the occurance of an insertion. It is a composite object", "bases[i] if base == r\"^\": # This is the starting", "if len(list(deletion_event_counts.keys())) > 0: sorted_deletion_counts = sorted( list(deletion_event_counts.items()), key=operator.itemgetter(1) )", "if len(ret_variant_list) > variant_list_length and fp_variant != None: fp_variant.write(\"\\n\".join(ret_variant_list[variant_list_length:]) +", "only the '*' symbols. How should this be handled properly?", "qual, info, ) ret_vcf_list.append(vcf_line) ################## else: sys.stderr.write( \"\\nWarning: a SNP", "### Insertions in the VCF format specifies the position where", "sys.stderr.write(\"\\n\") if output_prefix != \"\": # summary_file = output_prefix +", "if alignments_path_bam_exists == False or ( alignments_path_bam_exists == True and", "are deleted (these bases follow the current position). # In", "\"\": # summary_file = output_prefix + '.conssum'; summary_file = \"%s-cov_%d.variant.sum\"", "vcf_list_length and fp_vcf != None: fp_vcf.write(\"\\n\".join(ret_vcf_list[vcf_list_length:]) + \"\\n\") fp_vcf.flush() i", "print 'line_number: %d' % line_number; # print line; # print", "info = \"DP=%s;TYPE=snp\" % (coverage) ref_field = ref_base alt_field =", "coverage_threshold = int(sys.argv[2]) output_prefix = sys.argv[3] sam_file = sys.argv[4] bed_position", "error). # If there are deletions, get the most common", "): # In this case, deletions are a clear winner.", "None return None def main( alignments_path, reference_path, coverage_threshold, output_prefix, thread_id=0,", "multiple equally good choices. # In this case, we prefer", "### Handling base consensus. sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1) )", "in sorted_base_counts: if base_count[1] == most_common_base_count: if base_count[0] == ref_base:", "that point is already decided if there is going #", "0; # Allow for the case where there are multiple", "and calls consensus bases (or variants). import os import sys", "is a consensus variant (as opposed to a low frequency", "coverage of the current base with the coverage threshold. #", "coverage_threshold, verbose=use_bed, ) if len(ret_variant_list) > variant_list_length and fp_variant !=", "% ret_num_called_bases[0] summary_lines += \"num_correct_bases: %d\\n\" % ret_num_correct_bases[0] summary_lines +=", "= [0] ret_coverage_sum = [0] # lines = fp.readlines(); fp_variant", "float((i + 1))), ) ) sys.stderr.flush() variant_list_length = len(ret_variant_list) vcf_list_length", "== r\"*\": # This is a deletion, just count it.", "= %d, insertions = %d, deletions = %d, undercovered =", "command = \"samtools view -bS %s | samtools sort -", "mpileup_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path) ): command =", "= (j - i) + num_bases - 1 insertion_count +=", "+ 1 while bases[j] in \"0123456789\": j += 1 num_bases", ": (j + num_bases)].upper() increase_in_dict(insertion_event_counts, insertion) i += skip_bases else:", "Get the number of bases that are inserted; j =", "j += 1 num_bases = int(bases[(i + 1) : j])", "(position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());", "the number of bases that need to be skipped in", "check, just to see if there actually were any insertions", "base which is the same as ref, but the alt", "ret_coverage_sum, coverage_threshold, verbose=use_bed, ) if len(ret_variant_list) > variant_list_length and fp_variant", "bases, but only the '*' symbols. How should this be", "+= \"alignments_file: %s\\n\" % alignments_path summary_lines += \"mpileup_file: %s\\n\" %", "right of report indel position\">\\n' ) fp_vcf.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\") fp_vcf.flush() use_bed =", "== False: sys.stderr.write('ERROR: File \"%s\" does not exist!\\n' % alignments_path)", "additional problematic case, discovered this on 03.11.2014., when analyzing BWA-MEM's", "in fp: # line = lines[i]; if num_bases_to_skip > 0:", ") most_common_insertion_count = sorted_insertion_counts[-1][1] most_common_insertion_length = len(sorted_insertion_counts[-1][0]) insertion_unique = (", "return summary_file except IOError: sys.stderr.write( 'ERROR: Could not open file", "is a SAM file, then convert it to a sorted", "of: the special character '+', the number of the inserted", "alt_field = alt_base vcf_line = \"%s\\t%s\\t.\\t%s\\t%s\\t%d\\tPASS\\t%s\" % ( ref_name, position,", "= int(bases[(i + 1) : j]) skip_bases = (j -", "# len(lines)): num_bases_to_skip = 0 for line in fp: #", "in the reads ### which had a '*' at the", "vcf_file = \"%s-cov_%d.variant.vcf\" % (output_prefix, coverage_threshold) fp_vcf = open(vcf_file, \"w\")", "del, mnp, complex)\">\\n' ) fp_vcf.write( '##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele Frequency\">\\n' ) fp_vcf.write( '##INFO=<ID=SB,Number=1,Type=Integer,Description=\"Phred-scaled", "( non_indel_coverage_next_base + deletion_count + insertion_count ) > coverage_threshold: #", "this case, we prefer the choice which is equal to", "for writing!\\n' % (summary_file) ) return None return None def", "= %d\\tnon_indel_cov_curr = %d\\tmost_common_deletion_count = %d\\tref_base = %s\\tcons_base = %s\\tbase_counts", "ret_insertion_count[0], ret_deletion_count[0], ret_num_undercovered_bases[0], (float(ret_coverage_sum[0]) / float((i + 1))), ) )", "deletion) # Skip the length of the numeric entry plus", "None: fp_vcf.close() summary_lines = \"\" summary_lines += \"alignments_file: %s\\n\" %", "exit(1) reference_file = sys.argv[1] coverage_threshold = int(sys.argv[2]) output_prefix = sys.argv[3]", "str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) sys.stderr.write(\"\\n\") else: ret_num_correct_bases[0] +=", "ret_coverage_sum = [0] # lines = fp.readlines(); fp_variant = None", "'ERROR: Could not open file \"%s\" for writing!\\n' % (summary_file)", ") return # Convert the sorted BAM file to a", "that do not have any actual bases, but only the", "% (output_prefix, coverage_threshold) try: fp_sum = open(summary_file, \"w\") fp_sum.write(summary_lines) fp_sum.close()", "to disk.)\\n' ) sys.stderr.write( '\\tPosition parameter is a string specifying", "False or ( mpileup_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path)", "dir_name = os.path.dirname(alignments_path) if dir_name == \"\": dir_name = \".\"", "= sorted( list(deletion_event_counts.items()), key=operator.itemgetter(1) ) most_common_deletion_count = sorted_deletion_counts[-1][1] most_common_deletion_length =", "int(bed_pos_split[0]) bed_pos_end = int(bed_pos_split[1]) use_bed = True sys.stderr.write(\"Using location specified", "coverage threshold. Here we are interested even in the reads", "verbose=use_bed, ) if len(ret_variant_list) > variant_list_length and fp_variant != None:", "complex)\">\\n' ) fp_vcf.write( '##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele Frequency\">\\n' ) fp_vcf.write( '##INFO=<ID=SB,Number=1,Type=Integer,Description=\"Phred-scaled strand bias", "% (coverage) ref_field = ref_base alt_field = alt_base vcf_line =", "\"%s.mpileup\" % alignments_path_bam mpileup_exists = os.path.exists(mpileup_path) if mpileup_exists == False", "### alt_base = ( (\"{}\") if (len(sorted_base_counts) == 0) else", "return None return None def main( alignments_path, reference_path, coverage_threshold, output_prefix,", "to see if there actually were any deletions (to avoid", "bounds error). # If there are deletions, get the most", "if len(bed_split) != 2: use_bed = False else: bed_chromosome =", "were no bases in the sorted_base_counts!\" ) variant_line = (", "of bounds error). # If there are deletions, get the", "output_prefix = os.path.splitext(sam_file)[0] main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position) #", "def process_mpileup( alignments_path, reference_path, mpileup_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ):", "to be skipped. i += skip_bases elif base == r\"+\":", "len(list(deletion_event_counts.keys())) > 0: sorted_deletion_counts = sorted( list(deletion_event_counts.items()), key=operator.itemgetter(1) ) most_common_deletion_count", "% (summary_file) ) return None return None def main( alignments_path,", "the number of the inserted bases # and the actual", "= ( \"ins\\tpos = %s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr =", "%s\\tref = %s\\tnon_indel_cov_next = %d\\tnon_indel_cov_curr = %d\\tmost_common_deletion_count = %d\\tref_base =", "current_base_deletion_count): # if (non_indel_coverage_current_base < coverage_threshold): if int(coverage) < coverage_threshold:", "that the variant is a consensus variant (as opposed to", "to a sorted BAM. alignments_path_bam = alignments_path if os.path.exists(alignments_path) ==", "False: if len(sorted_base_counts) > 0: ret_snp_count[0] += 1 # ret_variant_list.append(line_number);", "%s\\tinsertion_counts = %s\\tdeletion_counts = %s\\t%s\\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count,", "Also, if input file is a SAM file, then convert", "if (non_indel_coverage_current_base < coverage_threshold): if int(coverage) < coverage_threshold: ret_num_undercovered_bases[0] +=", "6: qualities = split_line[5] bases = \"\" # Replace the", "special character '-', the number of the deleted bases #", "INDEL.\">\\n' ) fp_vcf.write( '##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description=\"Indicates that the variant is a consensus", "because we have i += 1 down there. elif base", "process_mpileup( alignments_path, reference_path, mpileup_path, coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ): fp", "the '*' sign, which I think # isn't relevant, as", "%s\\n\" % (ref_base)) sys.stdout.write(\"Consensus base: %s\\n\\n\" % (base_count[0])) # if", "str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### Deletions in the", "\"\" summary_lines += \"alignments_file: %s\\n\" % alignments_path summary_lines += \"mpileup_file:", "follow the current position). # In our approach, we ignore", "ret_deletion_count[0] += 1 # variant_line = 'deletion\\t%d\\t%s\\t%s\\t%s\\t%s' % (most_common_deletion_count, str(sorted_base_counts),", "this case, insertions are a clear winner. if insertion_unique ==", "i = 0 while i < len(original_bases): if original_bases[i] ==", "'.conssum'; summary_file = \"%s-cov_%d.variant.sum\" % (output_prefix, coverage_threshold) try: fp_sum =", "0) else (str(sorted_base_counts[-1][0])) ) qual = 1000 info = \"DP=%s;TYPE=ins\"", "fp_vcf.close() summary_lines = \"\" summary_lines += \"alignments_file: %s\\n\" % alignments_path", "symbols: '^' marking the read start and a char marking", "# sys.stdout.flush(); i = 0 while i < len(bases): base", "False ) else: most_common_insertion_count = 0 most_common_insertion_length = 0 insertion_unique", "if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])) ) qual = 1000", "num_bases_to_skip = 0 for line in fp: # line =", "ret_vcf_list.append(vcf_line) ################## return most_common_deletion_length else: # In this case, either", "coverage_threshold, output_prefix, thread_id=0, bed_position=\"\", ): # Sanity checking the existence", "'\\nTEST\\tpos = %s\\tcoverage = %d\\tnon_indel_cov_curr = %d\\tmost_common_base_count = %d\\tref_base =", "int(sys.argv[2]) output_prefix = sys.argv[3] sam_file = sys.argv[4] bed_position = \"\"", "import subprocess def increase_in_dict(dict_counter, value): try: dict_counter[value] += 1 except:", "the string. j = i + 1 while bases[j] in", "# Copyright <NAME>, 2015. www.sovic.org # # Creates a pileup", "0 ### Handling base consensus. sorted_base_counts = sorted( list(base_counts.items()), key=operator.itemgetter(1)", "but only the '*' symbols. How should this be handled", "= \"%s.mpileup\" % alignments_path_bam mpileup_exists = os.path.exists(mpileup_path) if mpileup_exists ==", "coverage threshold. # However, the total coverage also accounts for", "%s > %s.mpileup\" % ( reference_path, alignments_path_bam, alignments_path_bam, ) subprocess.call(command,", "0; ret_coverage_sum[0] += int(coverage) # TODO: Should I count total", "be written to disk.)\\n' ) sys.stderr.write( '\\tPosition parameter is a", "the insertion event. ### VCF output ### alt_base = (", "try: fp = open(mpileup_path, \"r\") except IOError: sys.stderr.write( 'ERROR: Could", "= int(sys.argv[2]) output_prefix = sys.argv[3] sam_file = sys.argv[4] bed_position =", ") return None return None def main( alignments_path, reference_path, coverage_threshold,", "try: fp_sum = open(summary_file, \"r\") lines = fp_sum.readlines() fp_sum.close() except", "!= None: fp_vcf.write(\"\\n\".join(ret_vcf_list[vcf_list_length:]) + \"\\n\") fp_vcf.flush() i += num_bases_to_skip i", ") == 1 ) else False ) else: most_common_deletion_count =", "ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=use_bed, ) if len(ret_variant_list) >", "most_common_deletion_count > most_common_insertion_count and most_common_deletion_count > non_indel_coverage_next_base ): # In", "ret_vcf_list = [] ret_snp_count = [0] ret_insertion_count = [0] ret_deletion_count", "ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold,", "sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### Deletions", "file, and the correctness of its extension. # Also, if", "ret_insertion_count[0] += 1 ret_num_called_bases[0] += most_common_insertion_length # variant_line = 'insertion\\t%d\\t%s\\t%s\\t%s\\t%s'", "+= \"num_undercovered_bases: %d\\n\" % ret_num_undercovered_bases[0] summary_lines += \"num_called_bases: %d\\n\" %", "key=operator.itemgetter(1) ) most_common_deletion_count = sorted_deletion_counts[-1][1] most_common_deletion_length = len(sorted_deletion_counts[-1][0]) deletion_unique =", "%s\\t%s\\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts),", "current_base_deletion_count += 1 elif base == r\"-\": # This marks", "there actually were any insertions (to avoid index out of", "is a string specifying \"chromosome:start-end\"\\n\\n' ) exit(1) reference_file = sys.argv[1]", "are insertions, get the most common one. if len(list(insertion_event_counts.keys())) >", "+= 1 # ret_coverage_sum[0] += 0; ret_coverage_sum[0] += int(coverage) #", "composite object # consisting of: the special character '+', the", "open(vcf_file, \"w\") fp_vcf.write(\"##fileformat=VCFv4.0\\n\") fp_vcf.write(\"##fileDate=20150409\\n\") fp_vcf.write(\"##source=%s\\n\" % (\" \".join(sys.argv))) fp_vcf.write(\"##reference=%s\\n\" %", "count consensus wins, or the # insertion/deletion count is ambiguous.", "# symbols: '^' marking the read start and a char", "os.makedirs(os.path.dirname(output_prefix)) variant_file = \"%s-cov_%d.variant.csv\" % (output_prefix, coverage_threshold) fp_variant = open(variant_file,", "ignored. # if (int(coverage) < coverage_threshold or int(coverage) == current_base_deletion_count):", "or int(position) == 3000000 or int(position) == 4000000): # print", "# if (output_prefix != '-'): # CollectSummaries([sam_file], output_prefix, output_prefix +", "pileup from a given SAM/BAM file, and calls consensus bases", "% ( ref_name, position, ref_field, alt_field, qual, info, ) ret_vcf_list.append(vcf_line)", "insertion) i += skip_bases else: increase_in_dict(base_counts, bases[i].upper()) i += 1", "file to a sorted BAM file. command = \"samtools view", "# This marks the occurance of an insertion. It is", "most_common_base_count: if base_count[0] == ref_base: is_good = True break if", "its extension. # Also, if input file is a SAM", ") continue fp_collect.write(\"\".join(lines) + \"\\n\") fp_collect.close() if __name__ == \"__main__\":", "This is a deletion, just count it. current_base_deletion_count += 1", "<reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\\n' % sys.argv[0]); #", "fp_vcf != None: fp_vcf.close() summary_lines = \"\" summary_lines += \"alignments_file:", "ret_snp_count[0] += 1 # ret_variant_list.append(line_number); variant_line = ( \"SNP\\tpos =", "fp_variant.close() if fp_vcf != None: fp_vcf.close() summary_lines = \"\" summary_lines", "there. elif base == r\"$\": # This marks the end", "this base are ignored. # if (int(coverage) < coverage_threshold or", "deleted (these bases follow the current position). # In our", "a SAM file, then convert it to a sorted BAM.", "error). # If there are insertions, get the most common", "fp_vcf = open(vcf_file, \"w\") fp_vcf.write(\"##fileformat=VCFv4.0\\n\") fp_vcf.write(\"##fileDate=20150409\\n\") fp_vcf.write(\"##source=%s\\n\" % (\" \".join(sys.argv)))", "in the VCF format specifies the position where a insertion", "+ \"\\n\") return 0 ref_name = split_line[0] position = split_line[1]", "str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip(), ) ) ret_variant_list.append(variant_line) ### Insertions in", "%d, coverage = %.2f\" % ( i, ret_snp_count[0], ret_insertion_count[0], ret_deletion_count[0],", ") ret_variant_list.append(variant_line) ### Insertions in the VCF format specifies the", "parameter is a string specifying \"chromosome:start-end\"\\n\\n' ) exit(1) reference_file =", "# sys.stderr.write('original_bases:\\n'); # sys.stderr.write(str(original_bases) + '\\n'); # sys.stderr.write('line:\\n'); # sys.stderr.write(line.strip()", "this case, because we count deletions one by one #", "= sorted( list(base_counts.items()), key=operator.itemgetter(1) ) try: most_common_base_count = sorted_base_counts[-1][1] except", "variant_line = ( \"undercovered1\\tpos = %s\\tref = %s\\tcoverage = %d\\tbase_counts" ]
[ "Populate Statement\" _columns = { 'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id',", "You should have received a copy of the GNU Affero", "is free software: you can redistribute it and/or modify #", "# This program is distributed in the hope that it", "= \"Account Payment Populate Statement\" _columns = { 'lines': fields.many2many('payment.line',", "line.currency.id, statement.currency.id, line.amount_currency, context=ctx) if not line.move_line_id.id: continue context =", "WITHOUT ANY WARRANTY; without even the implied warranty of #", "copy of the GNU Affero General Public License # along", "fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines') } def fields_view_get(self, cr,", "This program is free software: you can redistribute it and/or", "data['lines'] if not line_ids: return {'type': 'ir.actions.act_window_close'} statement = statement_obj.browse(cr,", "General Public License as # published by the Free Software", "abs(amount), 'period_id': statement.period_id.id, } voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context)", "[], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context) if", "osv class account_payment_populate_statement(osv.osv_memory): _name = \"account.payment.populate.statement\" _description = \"Account Payment", "('move_line_id.reconcile_id', '=', False), ('order_id.mode', '=', False), ('move_line_id.state','=','valid')])) domain = '[(\"id\",", "the GNU Affero General Public License # along with this", "self.read(cr, uid, ids, context=context)[0] line_ids = data['lines'] if not line_ids:", "'company_id': statement.company_id.id, 'currency_id': statement.currency.id, 'date': line.date or time.strftime('%Y-%m-%d'), 'amount': abs(amount),", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "GNU Affero General Public License for more details. # #", "res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)", "voucher_line_dict: voucher_line_dict.update({'voucher_id': voucher_id}) voucher_line_obj.create(cr, uid, voucher_line_dict, context=context) st_line_id = statement_line_obj.create(cr,", "move_line_ids=[line.move_line_id.id]) result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id=", "voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context) voucher_line_dict = {} for", "line.ml_maturity_date # was value_date earlier,but this field exists no more", "move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context) if line.move_line_id.move_id.id == move_line.move_id.id:", "version. # # This program is distributed in the hope", "in line_obj.browse(cr, uid, line_ids, context=context): ctx = context.copy() ctx['date'] =", "result['value']['line_dr_ids']: move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context) if line.move_line_id.move_id.id ==", "'Payment Lines') } def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None,", "line.move_line_id.id: continue context = dict(context, move_line_ids=[line.move_line_id.id]) result = voucher_obj.onchange_partner_id(cr, uid,", "coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management", "received a copy of the GNU Affero General Public License", "nodes = doc.xpath(\"//field[@name='lines']\") for node in nodes: node.set('domain', domain) res['arch']", "statement_obj.browse(cr, uid, context['active_id'], context=context) for line in line_obj.browse(cr, uid, line_ids,", "= data['lines'] if not line_ids: return {'type': 'ir.actions.act_window_close'} statement =", "License as # published by the Free Software Foundation, either", "along with this program. If not, see <http://www.gnu.org/licenses/>. # ##############################################################################", "= super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False) line_ids", "= self.pool.get('res.currency') voucher_obj = self.pool.get('account.voucher') voucher_line_obj = self.pool.get('account.voucher.line') move_line_obj =", "if line.move_line_id.move_id.id == move_line.move_id.id: voucher_line_dict = line_dict if voucher_line_dict: voucher_line_dict.update({'voucher_id':", "context.copy() ctx['date'] = line.ml_maturity_date # was value_date earlier,but this field", "return {'type': 'ir.actions.act_window_close'} statement = statement_obj.browse(cr, uid, context['active_id'], context=context) for", "from lxml import etree from openerp.osv import fields, osv class", "uid, ids, context=context)[0] line_ids = data['lines'] if not line_ids: return", "uid, [ ('move_line_id.reconcile_id', '=', False), ('bank_statement_line_id', '=', False), ('move_line_id.state','=','valid')]) line_ids.extend(line_obj.search(cr,", "statement = statement_obj.browse(cr, uid, context['active_id'], context=context) for line in line_obj.browse(cr,", "'+ str(line_ids)+')]' doc = etree.XML(res['arch']) nodes = doc.xpath(\"//field[@name='lines']\") for node", "'type': 'payment', 'name': line.name, 'partner_id': line.partner_id.id, 'journal_id': statement.journal_id.id, 'account_id': result['value'].get('account_id',", "doc.xpath(\"//field[@name='lines']\") for node in nodes: node.set('domain', domain) res['arch'] = etree.tostring(doc)", "= statement_obj.browse(cr, uid, context['active_id'], context=context) for line in line_obj.browse(cr, uid,", "('bank_statement_line_id', '=', False), ('move_line_id.state','=','valid')]) line_ids.extend(line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False),", "FOR A PARTICULAR PURPOSE. See the # GNU Affero General", "line.name, 'partner_id': line.partner_id.id, 'journal_id': statement.journal_id.id, 'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id), 'company_id': statement.company_id.id,", "self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False) line_ids = line_obj.search(cr,", "'journal_id': statement.journal_id.id, 'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id), 'company_id': statement.company_id.id, 'currency_id': statement.currency.id, 'date':", "False), ('bank_statement_line_id', '=', False), ('move_line_id.state','=','valid')]) line_ids.extend(line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=',", "General Public License for more details. # # You should", "# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This", "submenu=False) line_ids = line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False), ('bank_statement_line_id',", "\"Account Payment Populate Statement\" _columns = { 'lines': fields.many2many('payment.line', 'payment_line_rel_',", "lxml import etree from openerp.osv import fields, osv class account_payment_populate_statement(osv.osv_memory):", "openerp.osv import fields, osv class account_payment_populate_statement(osv.osv_memory): _name = \"account.payment.populate.statement\" _description", "node.set('domain', domain) res['arch'] = etree.tostring(doc) return res def populate_statement(self, cr,", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "free software: you can redistribute it and/or modify # it", "# GNU Affero General Public License for more details. #", "statement_line_obj = self.pool.get('account.bank.statement.line') currency_obj = self.pool.get('res.currency') voucher_obj = self.pool.get('account.voucher') voucher_line_obj", "or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU", "Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "= context.copy() ctx['date'] = line.ml_maturity_date # was value_date earlier,but this", "voucher_res, context=context) voucher_line_dict = {} for line_dict in result['value']['line_cr_ids'] +", "option) any later version. # # This program is distributed", "Foundation, either version 3 of the # License, or (at", "= { 'type': 'payment', 'name': line.name, 'partner_id': line.partner_id.id, 'journal_id': statement.journal_id.id,", "context=context) st_line_id = statement_line_obj.create(cr, uid, { 'name': line.order_id.reference or '?',", "ttype='payment', date=line.ml_maturity_date, context=context) if line.move_line_id: voucher_res = { 'type': 'payment',", "context=ctx) if not line.move_line_id.id: continue context = dict(context, move_line_ids=[line.move_line_id.id]) result", "can redistribute it and/or modify # it under the terms", "toolbar=False, submenu=False): line_obj = self.pool.get('payment.line') res = super(account_payment_populate_statement, self).fields_view_get(cr, uid,", "statement.period_id.id, } voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context) voucher_line_dict =", "the GNU Affero General Public License as # published by", "line_obj = self.pool.get('payment.line') res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type,", "fields, osv class account_payment_populate_statement(osv.osv_memory): _name = \"account.payment.populate.statement\" _description = \"Account", "line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']: move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'],", "'name': line.name, 'partner_id': line.partner_id.id, 'journal_id': statement.journal_id.id, 'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id), 'company_id':", "context=context) line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id}) return {'type': 'ir.actions.act_window_close'} #", "uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): line_obj = self.pool.get('payment.line') res", "either version 3 of the # License, or (at your", "import etree from openerp.osv import fields, osv class account_payment_populate_statement(osv.osv_memory): _name", "voucher_res = { 'type': 'payment', 'name': line.name, 'partner_id': line.partner_id.id, 'journal_id':", "line in line_obj.browse(cr, uid, line_ids, context=context): ctx = context.copy() ctx['date']", "License for more details. # # You should have received", "Tiny SPRL (<http://tiny.be>). # # This program is free software:", "GNU Affero General Public License # along with this program.", "def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): line_obj", "= voucher_obj.create(cr, uid, voucher_res, context=context) voucher_line_dict = {} for line_dict", "'name': line.order_id.reference or '?', 'amount': - amount, 'partner_id': line.partner_id.id, 'statement_id':", "line_obj.browse(cr, uid, line_ids, context=context): ctx = context.copy() ctx['date'] = line.ml_maturity_date", "Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL", "if not line.move_line_id.id: continue context = dict(context, move_line_ids=[line.move_line_id.id]) result =", "under the terms of the GNU Affero General Public License", "or time.strftime('%Y-%m-%d'), 'amount': abs(amount), 'period_id': statement.period_id.id, } voucher_id = voucher_obj.create(cr,", "'payment', 'name': line.name, 'partner_id': line.partner_id.id, 'journal_id': statement.journal_id.id, 'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id),", "self.pool.get('account.bank.statement') statement_line_obj = self.pool.get('account.bank.statement.line') currency_obj = self.pool.get('res.currency') voucher_obj = self.pool.get('account.voucher')", "amount, 'partner_id': line.partner_id.id, 'statement_id': statement.id, 'ref': line.communication, }, context=context) line_obj.write(cr,", "voucher_line_dict = {} for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']: move_line", "see <http://www.gnu.org/licenses/>. # ############################################################################## import time from lxml import etree", "result['value']['line_cr_ids'] + result['value']['line_dr_ids']: move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context) if", "'statement_id': statement.id, 'ref': line.communication, }, context=context) line_obj.write(cr, uid, [line.id], {'bank_statement_line_id':", "details. # # You should have received a copy of", "('order_id.mode', '=', False), ('move_line_id.state','=','valid')])) domain = '[(\"id\", \"in\", '+ str(line_ids)+')]'", "st_line_id = statement_line_obj.create(cr, uid, { 'name': line.order_id.reference or '?', 'amount':", "= etree.tostring(doc) return res def populate_statement(self, cr, uid, ids, context=None):", "line.move_line_id: voucher_res = { 'type': 'payment', 'name': line.name, 'partner_id': line.partner_id.id,", "(C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is", "uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False) line_ids = line_obj.search(cr, uid,", "If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from lxml", "line_ids = data['lines'] if not line_ids: return {'type': 'ir.actions.act_window_close'} statement", "+ result['value']['line_dr_ids']: move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context) if line.move_line_id.move_id.id", "in result['value']['line_cr_ids'] + result['value']['line_dr_ids']: move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context)", "} voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context) voucher_line_dict = {}", "domain) res['arch'] = etree.tostring(doc) return res def populate_statement(self, cr, uid,", "currency_obj = self.pool.get('res.currency') voucher_obj = self.pool.get('account.voucher') voucher_line_obj = self.pool.get('account.voucher.line') move_line_obj", "'amount': - amount, 'partner_id': line.partner_id.id, 'statement_id': statement.id, 'ref': line.communication, },", "class account_payment_populate_statement(osv.osv_memory): _name = \"account.payment.populate.statement\" _description = \"Account Payment Populate", "any later version. # # This program is distributed in", "context=context, toolbar=toolbar, submenu=False) line_ids = line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=',", "node in nodes: node.set('domain', domain) res['arch'] = etree.tostring(doc) return res", "# ############################################################################## import time from lxml import etree from openerp.osv", "not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from lxml import", "self.pool.get('payment.line') statement_obj = self.pool.get('account.bank.statement') statement_line_obj = self.pool.get('account.bank.statement.line') currency_obj = self.pool.get('res.currency')", "PARTICULAR PURPOSE. See the # GNU Affero General Public License", "# This program is free software: you can redistribute it", "uid, line_dict['move_line_id'], context) if line.move_line_id.move_id.id == move_line.move_id.id: voucher_line_dict = line_dict", "-*- ############################################################################## # # OpenERP, Open Source Management Solution #", "'ir.actions.act_window_close'} statement = statement_obj.browse(cr, uid, context['active_id'], context=context) for line in", "'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id), 'company_id': statement.company_id.id, 'currency_id': statement.currency.id, 'date': line.date or", "\"account.payment.populate.statement\" _description = \"Account Payment Populate Statement\" _columns = {", "uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context)", "[ ('move_line_id.reconcile_id', '=', False), ('bank_statement_line_id', '=', False), ('move_line_id.state','=','valid')]) line_ids.extend(line_obj.search(cr, uid,", "the # License, or (at your option) any later version.", "uid, line_ids, context=context): ctx = context.copy() ctx['date'] = line.ml_maturity_date #", "context) if line.move_line_id.move_id.id == move_line.move_id.id: voucher_line_dict = line_dict if voucher_line_dict:", "False), ('order_id.mode', '=', False), ('move_line_id.state','=','valid')])) domain = '[(\"id\", \"in\", '+", "this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time", "that it will be useful, # but WITHOUT ANY WARRANTY;", "import time from lxml import etree from openerp.osv import fields,", "in nodes: node.set('domain', domain) res['arch'] = etree.tostring(doc) return res def", "exists no more now amount = currency_obj.compute(cr, uid, line.currency.id, statement.currency.id,", "line_dict if voucher_line_dict: voucher_line_dict.update({'voucher_id': voucher_id}) voucher_line_obj.create(cr, uid, voucher_line_dict, context=context) st_line_id", "= self.pool.get('account.voucher') voucher_line_obj = self.pool.get('account.voucher.line') move_line_obj = self.pool.get('account.move.line') if context", "('move_line_id.state','=','valid')]) line_ids.extend(line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False), ('order_id.mode', '=', False),", "Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # #", "('move_line_id.state','=','valid')])) domain = '[(\"id\", \"in\", '+ str(line_ids)+')]' doc = etree.XML(res['arch'])", "self.pool.get('account.move.line') if context is None: context = {} data =", "Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program", "more details. # # You should have received a copy", "earlier,but this field exists no more now amount = currency_obj.compute(cr,", "now amount = currency_obj.compute(cr, uid, line.currency.id, statement.currency.id, line.amount_currency, context=ctx) if", "be useful, # but WITHOUT ANY WARRANTY; without even the", "terms of the GNU Affero General Public License as #", "None: context = {} data = self.read(cr, uid, ids, context=context)[0]", "= self.pool.get('account.bank.statement') statement_line_obj = self.pool.get('account.bank.statement.line') currency_obj = self.pool.get('res.currency') voucher_obj =", "'partner_id': line.partner_id.id, 'statement_id': statement.id, 'ref': line.communication, }, context=context) line_obj.write(cr, uid,", "value_date earlier,but this field exists no more now amount =", "line.partner_id.id, 'journal_id': statement.journal_id.id, 'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id), 'company_id': statement.company_id.id, 'currency_id': statement.currency.id,", "= self.pool.get('payment.line') res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context,", "\"in\", '+ str(line_ids)+')]' doc = etree.XML(res['arch']) nodes = doc.xpath(\"//field[@name='lines']\") for", "statement_line_obj.create(cr, uid, { 'name': line.order_id.reference or '?', 'amount': - amount,", "= self.pool.get('account.bank.statement.line') currency_obj = self.pool.get('res.currency') voucher_obj = self.pool.get('account.voucher') voucher_line_obj =", "str(line_ids)+')]' doc = etree.XML(res['arch']) nodes = doc.xpath(\"//field[@name='lines']\") for node in", "account_payment_populate_statement(osv.osv_memory): _name = \"account.payment.populate.statement\" _description = \"Account Payment Populate Statement\"", "# # This program is distributed in the hope that", "'=', False), ('bank_statement_line_id', '=', False), ('move_line_id.state','=','valid')]) line_ids.extend(line_obj.search(cr, uid, [ ('move_line_id.reconcile_id',", "# published by the Free Software Foundation, either version 3", "_description = \"Account Payment Populate Statement\" _columns = { 'lines':", "statement.currency.id, 'date': line.date or time.strftime('%Y-%m-%d'), 'amount': abs(amount), 'period_id': statement.period_id.id, }", "of the GNU Affero General Public License # along with", "statement.company_id.id, 'currency_id': statement.currency.id, 'date': line.date or time.strftime('%Y-%m-%d'), 'amount': abs(amount), 'period_id':", "ids, context=context)[0] line_ids = data['lines'] if not line_ids: return {'type':", "{ 'type': 'payment', 'name': line.name, 'partner_id': line.partner_id.id, 'journal_id': statement.journal_id.id, 'account_id':", "Payment Populate Statement\" _columns = { 'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id',", "fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): line_obj =", "self.pool.get('res.currency') voucher_obj = self.pool.get('account.voucher') voucher_line_obj = self.pool.get('account.voucher.line') move_line_obj = self.pool.get('account.move.line')", "or (at your option) any later version. # # This", "= {} for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']: move_line =", "context=None, toolbar=False, submenu=False): line_obj = self.pool.get('payment.line') res = super(account_payment_populate_statement, self).fields_view_get(cr,", "# but WITHOUT ANY WARRANTY; without even the implied warranty", "for line in line_obj.browse(cr, uid, line_ids, context=context): ctx = context.copy()", "Statement\" _columns = { 'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment", "cr, uid, ids, context=None): line_obj = self.pool.get('payment.line') statement_obj = self.pool.get('account.bank.statement')", "This program is distributed in the hope that it will", "and/or modify # it under the terms of the GNU", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "SPRL (<http://tiny.be>). # # This program is free software: you", "view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False) line_ids = line_obj.search(cr, uid, [", "move_line_obj = self.pool.get('account.move.line') if context is None: context = {}", "it and/or modify # it under the terms of the", "_columns = { 'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines')", "it will be useful, # but WITHOUT ANY WARRANTY; without", "uid, [ ('move_line_id.reconcile_id', '=', False), ('order_id.mode', '=', False), ('move_line_id.state','=','valid')])) domain", "line.partner_id.id, 'statement_id': statement.id, 'ref': line.communication, }, context=context) line_obj.write(cr, uid, [line.id],", "= \"account.payment.populate.statement\" _description = \"Account Payment Populate Statement\" _columns =", "# # This program is free software: you can redistribute", "result['value'].get('account_id', statement.journal_id.default_credit_account_id.id), 'company_id': statement.company_id.id, 'currency_id': statement.currency.id, 'date': line.date or time.strftime('%Y-%m-%d'),", "OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny", "move_line_obj.browse(cr, uid, line_dict['move_line_id'], context) if line.move_line_id.move_id.id == move_line.move_id.id: voucher_line_dict =", "'payment_id', 'line_id', 'Payment Lines') } def fields_view_get(self, cr, uid, view_id=None,", "hope that it will be useful, # but WITHOUT ANY", "'period_id': statement.period_id.id, } voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context) voucher_line_dict", "return res def populate_statement(self, cr, uid, ids, context=None): line_obj =", "result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id,", "program is distributed in the hope that it will be", "import fields, osv class account_payment_populate_statement(osv.osv_memory): _name = \"account.payment.populate.statement\" _description =", "uid, voucher_res, context=context) voucher_line_dict = {} for line_dict in result['value']['line_cr_ids']", "= currency_obj.compute(cr, uid, line.currency.id, statement.currency.id, line.amount_currency, context=ctx) if not line.move_line_id.id:", "or '?', 'amount': - amount, 'partner_id': line.partner_id.id, 'statement_id': statement.id, 'ref':", "voucher_obj.create(cr, uid, voucher_res, context=context) voucher_line_dict = {} for line_dict in", "of the GNU Affero General Public License as # published", "Public License # along with this program. If not, see", "more now amount = currency_obj.compute(cr, uid, line.currency.id, statement.currency.id, line.amount_currency, context=ctx)", "will be useful, # but WITHOUT ANY WARRANTY; without even", "'date': line.date or time.strftime('%Y-%m-%d'), 'amount': abs(amount), 'period_id': statement.period_id.id, } voucher_id", "line_dict['move_line_id'], context) if line.move_line_id.move_id.id == move_line.move_id.id: voucher_line_dict = line_dict if", "= statement_line_obj.create(cr, uid, { 'name': line.order_id.reference or '?', 'amount': -", "context = dict(context, move_line_ids=[line.move_line_id.id]) result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id,", "date=line.ml_maturity_date, context=context) if line.move_line_id: voucher_res = { 'type': 'payment', 'name':", "}, context=context) line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id}) return {'type': 'ir.actions.act_window_close'}", "res['arch'] = etree.tostring(doc) return res def populate_statement(self, cr, uid, ids,", "= etree.XML(res['arch']) nodes = doc.xpath(\"//field[@name='lines']\") for node in nodes: node.set('domain',", "= '[(\"id\", \"in\", '+ str(line_ids)+')]' doc = etree.XML(res['arch']) nodes =", "a copy of the GNU Affero General Public License #", "See the # GNU Affero General Public License for more", "context=context) for line in line_obj.browse(cr, uid, line_ids, context=context): ctx =", "self.pool.get('account.bank.statement.line') currency_obj = self.pool.get('res.currency') voucher_obj = self.pool.get('account.voucher') voucher_line_obj = self.pool.get('account.voucher.line')", "context=context)[0] line_ids = data['lines'] if not line_ids: return {'type': 'ir.actions.act_window_close'}", "dict(context, move_line_ids=[line.move_line_id.id]) result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount),", "'ref': line.communication, }, context=context) line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id}) return", "voucher_id}) voucher_line_obj.create(cr, uid, voucher_line_dict, context=context) st_line_id = statement_line_obj.create(cr, uid, {", "uid, voucher_line_dict, context=context) st_line_id = statement_line_obj.create(cr, uid, { 'name': line.order_id.reference", "for more details. # # You should have received a", "doc = etree.XML(res['arch']) nodes = doc.xpath(\"//field[@name='lines']\") for node in nodes:", "uid, ids, context=None): line_obj = self.pool.get('payment.line') statement_obj = self.pool.get('account.bank.statement') statement_line_obj", "statement.id, 'ref': line.communication, }, context=context) line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id})", "redistribute it and/or modify # it under the terms of", "'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines') } def fields_view_get(self, cr, uid,", "{'type': 'ir.actions.act_window_close'} statement = statement_obj.browse(cr, uid, context['active_id'], context=context) for line", "the terms of the GNU Affero General Public License as", "context=context): ctx = context.copy() ctx['date'] = line.ml_maturity_date # was value_date", "move_line.move_id.id: voucher_line_dict = line_dict if voucher_line_dict: voucher_line_dict.update({'voucher_id': voucher_id}) voucher_line_obj.create(cr, uid,", "ids, context=None): line_obj = self.pool.get('payment.line') statement_obj = self.pool.get('account.bank.statement') statement_line_obj =", "<http://www.gnu.org/licenses/>. # ############################################################################## import time from lxml import etree from", "as # published by the Free Software Foundation, either version", "your option) any later version. # # This program is", "from openerp.osv import fields, osv class account_payment_populate_statement(osv.osv_memory): _name = \"account.payment.populate.statement\"", "nodes: node.set('domain', domain) res['arch'] = etree.tostring(doc) return res def populate_statement(self,", "self.pool.get('account.voucher') voucher_line_obj = self.pool.get('account.voucher.line') move_line_obj = self.pool.get('account.move.line') if context is", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "(at your option) any later version. # # This program", "voucher_obj = self.pool.get('account.voucher') voucher_line_obj = self.pool.get('account.voucher.line') move_line_obj = self.pool.get('account.move.line') if", "# along with this program. If not, see <http://www.gnu.org/licenses/>. #", "License, or (at your option) any later version. # #", "the hope that it will be useful, # but WITHOUT", "Affero General Public License for more details. # # You", "False), ('move_line_id.state','=','valid')])) domain = '[(\"id\", \"in\", '+ str(line_ids)+')]' doc =", "this field exists no more now amount = currency_obj.compute(cr, uid,", "etree.XML(res['arch']) nodes = doc.xpath(\"//field[@name='lines']\") for node in nodes: node.set('domain', domain)", "software: you can redistribute it and/or modify # it under", "False), ('move_line_id.state','=','valid')]) line_ids.extend(line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False), ('order_id.mode', '=',", "amount = currency_obj.compute(cr, uid, line.currency.id, statement.currency.id, line.amount_currency, context=ctx) if not", "_name = \"account.payment.populate.statement\" _description = \"Account Payment Populate Statement\" _columns", "for node in nodes: node.set('domain', domain) res['arch'] = etree.tostring(doc) return", "should have received a copy of the GNU Affero General", "FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero", "Free Software Foundation, either version 3 of the # License,", "line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False), ('bank_statement_line_id', '=', False), ('move_line_id.state','=','valid')])", "it under the terms of the GNU Affero General Public", "'[(\"id\", \"in\", '+ str(line_ids)+')]' doc = etree.XML(res['arch']) nodes = doc.xpath(\"//field[@name='lines']\")", "= doc.xpath(\"//field[@name='lines']\") for node in nodes: node.set('domain', domain) res['arch'] =", "voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date,", "view_type='form', context=None, toolbar=False, submenu=False): line_obj = self.pool.get('payment.line') res = super(account_payment_populate_statement,", "'?', 'amount': - amount, 'partner_id': line.partner_id.id, 'statement_id': statement.id, 'ref': line.communication,", "'partner_id': line.partner_id.id, 'journal_id': statement.journal_id.id, 'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id), 'company_id': statement.company_id.id, 'currency_id':", "line_ids.extend(line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False), ('order_id.mode', '=', False), ('move_line_id.state','=','valid')]))", "= self.read(cr, uid, ids, context=context)[0] line_ids = data['lines'] if not", "field exists no more now amount = currency_obj.compute(cr, uid, line.currency.id,", "distributed in the hope that it will be useful, #", "not line.move_line_id.id: continue context = dict(context, move_line_ids=[line.move_line_id.id]) result = voucher_obj.onchange_partner_id(cr,", "uid, { 'name': line.order_id.reference or '?', 'amount': - amount, 'partner_id':", "published by the Free Software Foundation, either version 3 of", "License # along with this program. If not, see <http://www.gnu.org/licenses/>.", "= move_line_obj.browse(cr, uid, line_dict['move_line_id'], context) if line.move_line_id.move_id.id == move_line.move_id.id: voucher_line_dict", "} def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):", "(<http://tiny.be>). # # This program is free software: you can", "the Free Software Foundation, either version 3 of the #", "toolbar=toolbar, submenu=False) line_ids = line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False),", "version 3 of the # License, or (at your option)", "# it under the terms of the GNU Affero General", "'=', False), ('move_line_id.state','=','valid')]) line_ids.extend(line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False), ('order_id.mode',", "= self.pool.get('payment.line') statement_obj = self.pool.get('account.bank.statement') statement_line_obj = self.pool.get('account.bank.statement.line') currency_obj =", "useful, # but WITHOUT ANY WARRANTY; without even the implied", "journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context) if line.move_line_id: voucher_res", "'line_id', 'Payment Lines') } def fields_view_get(self, cr, uid, view_id=None, view_type='form',", "if line.move_line_id: voucher_res = { 'type': 'payment', 'name': line.name, 'partner_id':", "you can redistribute it and/or modify # it under the", "is None: context = {} data = self.read(cr, uid, ids,", "later version. # # This program is distributed in the", "{ 'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines') } def", "# You should have received a copy of the GNU", "amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context) if line.move_line_id: voucher_res =", "not line_ids: return {'type': 'ir.actions.act_window_close'} statement = statement_obj.browse(cr, uid, context['active_id'],", "line_obj = self.pool.get('payment.line') statement_obj = self.pool.get('account.bank.statement') statement_line_obj = self.pool.get('account.bank.statement.line') currency_obj", "'=', False), ('order_id.mode', '=', False), ('move_line_id.state','=','valid')])) domain = '[(\"id\", \"in\",", "== move_line.move_id.id: voucher_line_dict = line_dict if voucher_line_dict: voucher_line_dict.update({'voucher_id': voucher_id}) voucher_line_obj.create(cr,", "= { 'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines') }", "line_ids = line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False), ('bank_statement_line_id', '=',", "with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import", "uid, context['active_id'], context=context) for line in line_obj.browse(cr, uid, line_ids, context=context):", "Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). #", "= line_dict if voucher_line_dict: voucher_line_dict.update({'voucher_id': voucher_id}) voucher_line_obj.create(cr, uid, voucher_line_dict, context=context)", "statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context) if line.move_line_id: voucher_res = { 'type':", "Lines') } def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False,", "line.move_line_id.move_id.id == move_line.move_id.id: voucher_line_dict = line_dict if voucher_line_dict: voucher_line_dict.update({'voucher_id': voucher_id})", "if context is None: context = {} data = self.read(cr,", "Affero General Public License # along with this program. If", "partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context) if line.move_line_id:", "'currency_id': statement.currency.id, 'date': line.date or time.strftime('%Y-%m-%d'), 'amount': abs(amount), 'period_id': statement.period_id.id,", "context=None): line_obj = self.pool.get('payment.line') statement_obj = self.pool.get('account.bank.statement') statement_line_obj = self.pool.get('account.bank.statement.line')", "('move_line_id.reconcile_id', '=', False), ('bank_statement_line_id', '=', False), ('move_line_id.state','=','valid')]) line_ids.extend(line_obj.search(cr, uid, [", "voucher_line_dict.update({'voucher_id': voucher_id}) voucher_line_obj.create(cr, uid, voucher_line_dict, context=context) st_line_id = statement_line_obj.create(cr, uid,", "statement_obj = self.pool.get('account.bank.statement') statement_line_obj = self.pool.get('account.bank.statement.line') currency_obj = self.pool.get('res.currency') voucher_obj", "'=', False), ('move_line_id.state','=','valid')])) domain = '[(\"id\", \"in\", '+ str(line_ids)+')]' doc", "super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False) line_ids =", "# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open", "2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free", "view_type=view_type, context=context, toolbar=toolbar, submenu=False) line_ids = line_obj.search(cr, uid, [ ('move_line_id.reconcile_id',", "= voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment',", "voucher_line_obj = self.pool.get('account.voucher.line') move_line_obj = self.pool.get('account.move.line') if context is None:", "etree.tostring(doc) return res def populate_statement(self, cr, uid, ids, context=None): line_obj", "Public License as # published by the Free Software Foundation,", "context = {} data = self.read(cr, uid, ids, context=context)[0] line_ids", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "self.pool.get('account.voucher.line') move_line_obj = self.pool.get('account.move.line') if context is None: context =", "cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): line_obj = self.pool.get('payment.line')", "populate_statement(self, cr, uid, ids, context=None): line_obj = self.pool.get('payment.line') statement_obj =", "line.communication, }, context=context) line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id}) return {'type':", "data = self.read(cr, uid, ids, context=context)[0] line_ids = data['lines'] if", "have received a copy of the GNU Affero General Public", "line.date or time.strftime('%Y-%m-%d'), 'amount': abs(amount), 'period_id': statement.period_id.id, } voucher_id =", "but WITHOUT ANY WARRANTY; without even the implied warranty of", "General Public License # along with this program. If not,", "# was value_date earlier,but this field exists no more now", "line.order_id.reference or '?', 'amount': - amount, 'partner_id': line.partner_id.id, 'statement_id': statement.id,", "Public License for more details. # # You should have", "submenu=False): line_obj = self.pool.get('payment.line') res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id,", "program is free software: you can redistribute it and/or modify", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #", "time from lxml import etree from openerp.osv import fields, osv", "Affero General Public License as # published by the Free", "statement.journal_id.default_credit_account_id.id), 'company_id': statement.company_id.id, 'currency_id': statement.currency.id, 'date': line.date or time.strftime('%Y-%m-%d'), 'amount':", "res def populate_statement(self, cr, uid, ids, context=None): line_obj = self.pool.get('payment.line')", "'amount': abs(amount), 'period_id': statement.period_id.id, } voucher_id = voucher_obj.create(cr, uid, voucher_res,", "= line_obj.search(cr, uid, [ ('move_line_id.reconcile_id', '=', False), ('bank_statement_line_id', '=', False),", "line_ids: return {'type': 'ir.actions.act_window_close'} statement = statement_obj.browse(cr, uid, context['active_id'], context=context)", "currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context) if line.move_line_id: voucher_res = {", "uid, line.currency.id, statement.currency.id, line.amount_currency, context=ctx) if not line.move_line_id.id: continue context", "context=context) if line.move_line_id: voucher_res = { 'type': 'payment', 'name': line.name,", "3 of the # License, or (at your option) any", "utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution", "############################################################################## # # OpenERP, Open Source Management Solution # Copyright", "= dict(context, move_line_ids=[line.move_line_id.id]) result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id,", "{} data = self.read(cr, uid, ids, context=context)[0] line_ids = data['lines']", "# # OpenERP, Open Source Management Solution # Copyright (C)", "the # GNU Affero General Public License for more details.", "context is None: context = {} data = self.read(cr, uid,", "program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from", "time.strftime('%Y-%m-%d'), 'amount': abs(amount), 'period_id': statement.period_id.id, } voucher_id = voucher_obj.create(cr, uid,", "voucher_line_dict = line_dict if voucher_line_dict: voucher_line_dict.update({'voucher_id': voucher_id}) voucher_line_obj.create(cr, uid, voucher_line_dict,", "# OpenERP, Open Source Management Solution # Copyright (C) 2004-2010", "def populate_statement(self, cr, uid, ids, context=None): line_obj = self.pool.get('payment.line') statement_obj", "{} for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']: move_line = move_line_obj.browse(cr,", "for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']: move_line = move_line_obj.browse(cr, uid,", "was value_date earlier,but this field exists no more now amount", "[ ('move_line_id.reconcile_id', '=', False), ('order_id.mode', '=', False), ('move_line_id.state','=','valid')])) domain =", "by the Free Software Foundation, either version 3 of the", "PURPOSE. See the # GNU Affero General Public License for", "domain = '[(\"id\", \"in\", '+ str(line_ids)+')]' doc = etree.XML(res['arch']) nodes", "if voucher_line_dict: voucher_line_dict.update({'voucher_id': voucher_id}) voucher_line_obj.create(cr, uid, voucher_line_dict, context=context) st_line_id =", "modify # it under the terms of the GNU Affero", "= self.pool.get('account.voucher.line') move_line_obj = self.pool.get('account.move.line') if context is None: context", "etree from openerp.osv import fields, osv class account_payment_populate_statement(osv.osv_memory): _name =", "context['active_id'], context=context) for line in line_obj.browse(cr, uid, line_ids, context=context): ctx", "statement.currency.id, line.amount_currency, context=ctx) if not line.move_line_id.id: continue context = dict(context,", "= line.ml_maturity_date # was value_date earlier,but this field exists no", "view_id=None, view_type='form', context=None, toolbar=False, submenu=False): line_obj = self.pool.get('payment.line') res =", "if not line_ids: return {'type': 'ir.actions.act_window_close'} statement = statement_obj.browse(cr, uid,", "no more now amount = currency_obj.compute(cr, uid, line.currency.id, statement.currency.id, line.amount_currency,", "'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines') } def fields_view_get(self,", "= self.pool.get('account.move.line') if context is None: context = {} data", "context=context) voucher_line_dict = {} for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']:", "self.pool.get('payment.line') res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,", "voucher_line_dict, context=context) st_line_id = statement_line_obj.create(cr, uid, { 'name': line.order_id.reference or", "in the hope that it will be useful, # but", "continue context = dict(context, move_line_ids=[line.move_line_id.id]) result = voucher_obj.onchange_partner_id(cr, uid, [],", "{ 'name': line.order_id.reference or '?', 'amount': - amount, 'partner_id': line.partner_id.id,", "line_ids, context=context): ctx = context.copy() ctx['date'] = line.ml_maturity_date # was", "############################################################################## import time from lxml import etree from openerp.osv import", "ctx['date'] = line.ml_maturity_date # was value_date earlier,but this field exists", "GNU Affero General Public License as # published by the", "Software Foundation, either version 3 of the # License, or", "line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id}) return {'type': 'ir.actions.act_window_close'} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:", "currency_obj.compute(cr, uid, line.currency.id, statement.currency.id, line.amount_currency, context=ctx) if not line.move_line_id.id: continue", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or", "A PARTICULAR PURPOSE. See the # GNU Affero General Public", "statement.journal_id.id, 'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id), 'company_id': statement.company_id.id, 'currency_id': statement.currency.id, 'date': line.date", "ctx = context.copy() ctx['date'] = line.ml_maturity_date # was value_date earlier,but", "-*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source", "of the # License, or (at your option) any later", "line.amount_currency, context=ctx) if not line.move_line_id.id: continue context = dict(context, move_line_ids=[line.move_line_id.id])", "is distributed in the hope that it will be useful,", "# License, or (at your option) any later version. #", "# # You should have received a copy of the", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "- amount, 'partner_id': line.partner_id.id, 'statement_id': statement.id, 'ref': line.communication, }, context=context)", "voucher_line_obj.create(cr, uid, voucher_line_dict, context=context) st_line_id = statement_line_obj.create(cr, uid, { 'name':", "= {} data = self.read(cr, uid, ids, context=context)[0] line_ids =", "without even the implied warranty of # MERCHANTABILITY or FITNESS" ]
[ "So, for example if your mailroot is ``~/Maildir``, and you", "at HomeMail/fred, the mbox setting would be: ``mbox=\"~/Maildir/HomeMail/fred\"``. Note the", "whatever you choose, of course. Widget requirements: keyring_. .. _keyring:", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "= <userid> password = <password> keyring.set_password('imapwidget', user, password) mbox names", "password is not None: self.password = password else: logger.critical('Gnome Keyring", "Gnome Keyring. Writing your password to the keyring initially is", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "= [ ('mbox', '\"INBOX\"', 'mailbox to fetch'), ('label', 'INBOX', 'label", "for example if your mailroot is ``~/Maildir``, and you want", "rights # to use, copy, modify, merge, publish, distribute, sublicense,", "keyring initially is as simple as (changing out <userid> and", "response[0].decode() self.text = self.label + ': ' + re.sub(r'\\).*$', '',", "permission notice shall be included in # all copies or", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "# # The above copyright notice and this permission notice", "self.text = 'Gnome Keyring Error' else: im.login(self.user, self.password) status, response", "re import keyring from libqtile.log_utils import logger from libqtile.widget import", "self.user) if password is not None: self.password = password else:", "script once:: #!/usr/bin/env python3 import keyring user = <userid> password", "number of unseen messages present. I've configured it to only", "and associated documentation files (the \"Software\"), to deal # in", "Software without restriction, including without limitation the rights # to", "and to permit persons to whom the Software is #", "copies of the Software, and to permit persons to whom", "hereby granted, free of charge, to any person obtaining a", "fetch'), ('label', 'INBOX', 'label for display'), ('user', None, 'email username'),", "and report the number of unseen messages present. I've configured", "[ ('mbox', '\"INBOX\"', 'mailbox to fetch'), ('label', 'INBOX', 'label for", "import base class ImapWidget(base.ThreadPoolText): \"\"\"Email IMAP widget This widget will", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "distribute, sublicense, and/or sell # copies of the Software, and", "email boxes and report the number of unseen messages present.", "if your mailroot is ``~/Maildir``, and you want to look", "ssl. Your password is obtained from the Gnome Keyring. Writing", "base.ThreadPoolText.__init__(self, \"\", **config) self.add_defaults(ImapWidget.defaults) password = <PASSWORD>_password('imapwidget', self.user) if password", "the file ~/.local/share/python_keyring/keyringrc.cfg with the following contents:: [backend] default-keyring=keyring.backends.Gnome.Keyring keyring-path=/home/<userid>/.local/share/keyring/", "= self.label + ': ' + re.sub(r'\\).*$', '', re.sub(r'^.*N\\s', '',", "# all copies or substantial portions of the Software. #", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "of unseen messages present. I've configured it to only work", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "included in # all copies or substantial portions of the", "None, 'email username'), ('server', None, 'email server name'), ] def", "if self.password == '<PASSWORD>': self.text = 'Gnome Keyring Error' else:", "self.text = self.label + ': ' + re.sub(r'\\).*$', '', re.sub(r'^.*N\\s',", ".. _keyring: https://pypi.org/project/keyring/ \"\"\" defaults = [ ('mbox', '\"INBOX\"', 'mailbox", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "widget This widget will scan one of your imap email", "deal # in the Software without restriction, including without limitation", "': ' + re.sub(r'\\).*$', '', re.sub(r'^.*N\\s', '', self.text)) im.logout() return", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "password): 1) create the file ~/.local/share/python_keyring/keyringrc.cfg with the following contents::", "notice and this permission notice shall be included in #", "example if your mailroot is ``~/Maildir``, and you want to", "mbox names must include the path to the mbox (except", "logger from libqtile.widget import base class ImapWidget(base.ThreadPoolText): \"\"\"Email IMAP widget", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "default INBOX). So, for example if your mailroot is ``~/Maildir``,", "'INBOX', 'label for display'), ('user', None, 'email username'), ('server', None,", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "all copies or substantial portions of the Software. # #", "imap email boxes and report the number of unseen messages", "software and associated documentation files (the \"Software\"), to deal #", "password is obtained from the Gnome Keyring. Writing your password", "you choose, of course. Widget requirements: keyring_. .. _keyring: https://pypi.org/project/keyring/", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "the Software without restriction, including without limitation the rights #", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "(changing out <userid> and <password> for your userid and password):", "keyring-path=/home/<userid>/.local/share/keyring/ 2) Execute the following python shell script once:: #!/usr/bin/env", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "im.status(self.mbox, '(UNSEEN)') self.text = response[0].decode() self.text = self.label + ':", "userid and password): 1) create the file ~/.local/share/python_keyring/keyringrc.cfg with the", "IN THE # SOFTWARE. import imaplib import re import keyring", "of course. Widget requirements: keyring_. .. _keyring: https://pypi.org/project/keyring/ \"\"\" defaults", "imap with ssl. Your password is obtained from the Gnome", "self.text = response[0].decode() self.text = self.label + ': ' +", "'(UNSEEN)') self.text = response[0].decode() self.text = self.label + ': '", "messages present. I've configured it to only work with imap", "Execute the following python shell script once:: #!/usr/bin/env python3 import", "# of this software and associated documentation files (the \"Software\"),", "furnished to do so, subject to the following conditions: #", "to do so, subject to the following conditions: # #", "# The above copyright notice and this permission notice shall", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "'email username'), ('server', None, 'email server name'), ] def __init__(self,", "Error' else: im.login(self.user, self.password) status, response = im.status(self.mbox, '(UNSEEN)') self.text", "a copy # of this software and associated documentation files", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "include the path to the mbox (except for the default", "must include the path to the mbox (except for the", "notice shall be included in # all copies or substantial", "INBOX). So, for example if your mailroot is ``~/Maildir``, and", "boxes and report the number of unseen messages present. I've", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "keyring user = <userid> password = <password> keyring.set_password('imapwidget', user, password)", "and this permission notice shall be included in # all", "configured it to only work with imap with ssl. Your", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "from libqtile.log_utils import logger from libqtile.widget import base class ImapWidget(base.ThreadPoolText):", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "IMAP widget This widget will scan one of your imap", "following conditions: # # The above copyright notice and this", "_keyring: https://pypi.org/project/keyring/ \"\"\" defaults = [ ('mbox', '\"INBOX\"', 'mailbox to", "to deal # in the Software without restriction, including without", "('server', None, 'email server name'), ] def __init__(self, **config): base.ThreadPoolText.__init__(self,", "conditions: # # The above copyright notice and this permission", "shell script once:: #!/usr/bin/env python3 import keyring user = <userid>", "Keyring Error') def poll(self): im = imaplib.IMAP4_SSL(self.server, 993) if self.password", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "present. I've configured it to only work with imap with", "default-keyring=keyring.backends.Gnome.Keyring keyring-path=/home/<userid>/.local/share/keyring/ 2) Execute the following python shell script once::", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "self.password == '<PASSWORD>': self.text = 'Gnome Keyring Error' else: im.login(self.user,", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "THE # SOFTWARE. import imaplib import re import keyring from", "<userid> password = <password> keyring.set_password('imapwidget', user, password) mbox names must", "password to the keyring initially is as simple as (changing", "**config): base.ThreadPoolText.__init__(self, \"\", **config) self.add_defaults(ImapWidget.defaults) password = <PASSWORD>_password('imapwidget', self.user) if", "-*- # Copyright (c) 2015 <NAME> # # Permission is", "OTHER DEALINGS IN THE # SOFTWARE. import imaplib import re", "= response[0].decode() self.text = self.label + ': ' + re.sub(r'\\).*$',", "password else: logger.critical('Gnome Keyring Error') def poll(self): im = imaplib.IMAP4_SSL(self.server,", "# -*- coding: utf-8 -*- # Copyright (c) 2015 <NAME>", "import keyring user = <userid> password = <password> keyring.set_password('imapwidget', user,", "(except for the default INBOX). So, for example if your", "and/or sell # copies of the Software, and to permit", "the rights # to use, copy, modify, merge, publish, distribute,", "for your userid and password): 1) create the file ~/.local/share/python_keyring/keyringrc.cfg", "be included in # all copies or substantial portions of", "is hereby granted, free of charge, to any person obtaining", "name'), ] def __init__(self, **config): base.ThreadPoolText.__init__(self, \"\", **config) self.add_defaults(ImapWidget.defaults) password", "'mailbox to fetch'), ('label', 'INBOX', 'label for display'), ('user', None,", "'label for display'), ('user', None, 'email username'), ('server', None, 'email", "password = <PASSWORD>_password('imapwidget', self.user) if password is not None: self.password", "user, password) mbox names must include the path to the", "import logger from libqtile.widget import base class ImapWidget(base.ThreadPoolText): \"\"\"Email IMAP", "Writing your password to the keyring initially is as simple", "import re import keyring from libqtile.log_utils import logger from libqtile.widget", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "and you want to look at the mailbox at HomeMail/fred,", "the number of unseen messages present. I've configured it to", "person obtaining a copy # of this software and associated", "# # Permission is hereby granted, free of charge, to", "without restriction, including without limitation the rights # to use,", "= <password> keyring.set_password('imapwidget', user, password) mbox names must include the", "mbox (except for the default INBOX). So, for example if", "subject to the following conditions: # # The above copyright", "#!/usr/bin/env python3 import keyring user = <userid> password = <password>", "scan one of your imap email boxes and report the", "following python shell script once:: #!/usr/bin/env python3 import keyring user", "Widget requirements: keyring_. .. _keyring: https://pypi.org/project/keyring/ \"\"\" defaults = [", "\"\", **config) self.add_defaults(ImapWidget.defaults) password = <PASSWORD>_password('imapwidget', self.user) if password is", "is not None: self.password = password else: logger.critical('Gnome Keyring Error')", "= password else: logger.critical('Gnome Keyring Error') def poll(self): im =", "your mailroot is ``~/Maildir``, and you want to look at", "mailroot is ``~/Maildir``, and you want to look at the", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "ImapWidget(base.ThreadPoolText): \"\"\"Email IMAP widget This widget will scan one of", "look at the mailbox at HomeMail/fred, the mbox setting would", "and password): 1) create the file ~/.local/share/python_keyring/keyringrc.cfg with the following", "('label', 'INBOX', 'label for display'), ('user', None, 'email username'), ('server',", "not None: self.password = password else: logger.critical('Gnome Keyring Error') def", "or substantial portions of the Software. # # THE SOFTWARE", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "-*- coding: utf-8 -*- # Copyright (c) 2015 <NAME> #", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "of quotes! Labels can be whatever you choose, of course.", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "' + re.sub(r'\\).*$', '', re.sub(r'^.*N\\s', '', self.text)) im.logout() return self.text", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import", "DEALINGS IN THE # SOFTWARE. import imaplib import re import", "'Gnome Keyring Error' else: im.login(self.user, self.password) status, response = im.status(self.mbox,", "# Permission is hereby granted, free of charge, to any", "of charge, to any person obtaining a copy # of", "with ssl. Your password is obtained from the Gnome Keyring.", "at the mailbox at HomeMail/fred, the mbox setting would be:", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "merge, publish, distribute, sublicense, and/or sell # copies of the", "is ``~/Maildir``, and you want to look at the mailbox", "USE OR OTHER DEALINGS IN THE # SOFTWARE. import imaplib", "(c) 2015 <NAME> # # Permission is hereby granted, free", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "obtained from the Gnome Keyring. Writing your password to the", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "username'), ('server', None, 'email server name'), ] def __init__(self, **config):", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "] def __init__(self, **config): base.ThreadPoolText.__init__(self, \"\", **config) self.add_defaults(ImapWidget.defaults) password =", "requirements: keyring_. .. _keyring: https://pypi.org/project/keyring/ \"\"\" defaults = [ ('mbox',", "path to the mbox (except for the default INBOX). So,", "im = imaplib.IMAP4_SSL(self.server, 993) if self.password == '<PASSWORD>': self.text =", "for the default INBOX). So, for example if your mailroot", "= <PASSWORD>_password('imapwidget', self.user) if password is not None: self.password =", "Error') def poll(self): im = imaplib.IMAP4_SSL(self.server, 993) if self.password ==", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "so, subject to the following conditions: # # The above", "would be: ``mbox=\"~/Maildir/HomeMail/fred\"``. Note the nested sets of quotes! Labels", "libqtile.widget import base class ImapWidget(base.ThreadPoolText): \"\"\"Email IMAP widget This widget", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "<password> keyring.set_password('imapwidget', user, password) mbox names must include the path", "sets of quotes! Labels can be whatever you choose, of", "to the mbox (except for the default INBOX). So, for", "as (changing out <userid> and <password> for your userid and", "libqtile.log_utils import logger from libqtile.widget import base class ImapWidget(base.ThreadPoolText): \"\"\"Email", "the following conditions: # # The above copyright notice and", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "the path to the mbox (except for the default INBOX).", "Copyright (c) 2015 <NAME> # # Permission is hereby granted,", "'\"INBOX\"', 'mailbox to fetch'), ('label', 'INBOX', 'label for display'), ('user',", "the nested sets of quotes! Labels can be whatever you", "# SOFTWARE. import imaplib import re import keyring from libqtile.log_utils", "'email server name'), ] def __init__(self, **config): base.ThreadPoolText.__init__(self, \"\", **config)", "the Software, and to permit persons to whom the Software", "to look at the mailbox at HomeMail/fred, the mbox setting", "self.add_defaults(ImapWidget.defaults) password = <PASSWORD>_password('imapwidget', self.user) if password is not None:", "keyring from libqtile.log_utils import logger from libqtile.widget import base class", "in # all copies or substantial portions of the Software.", "server name'), ] def __init__(self, **config): base.ThreadPoolText.__init__(self, \"\", **config) self.add_defaults(ImapWidget.defaults)", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "else: logger.critical('Gnome Keyring Error') def poll(self): im = imaplib.IMAP4_SSL(self.server, 993)", "persons to whom the Software is # furnished to do", "OR OTHER DEALINGS IN THE # SOFTWARE. import imaplib import", "python3 import keyring user = <userid> password = <password> keyring.set_password('imapwidget',", "associated documentation files (the \"Software\"), to deal # in the", "user = <userid> password = <password> keyring.set_password('imapwidget', user, password) mbox", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "to any person obtaining a copy # of this software", "mbox setting would be: ``mbox=\"~/Maildir/HomeMail/fred\"``. Note the nested sets of", "https://pypi.org/project/keyring/ \"\"\" defaults = [ ('mbox', '\"INBOX\"', 'mailbox to fetch'),", "of the Software, and to permit persons to whom the", "this software and associated documentation files (the \"Software\"), to deal", "create the file ~/.local/share/python_keyring/keyringrc.cfg with the following contents:: [backend] default-keyring=keyring.backends.Gnome.Keyring", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "I've configured it to only work with imap with ssl.", "the mbox (except for the default INBOX). So, for example", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "your imap email boxes and report the number of unseen", "course. Widget requirements: keyring_. .. _keyring: https://pypi.org/project/keyring/ \"\"\" defaults =", "self.label + ': ' + re.sub(r'\\).*$', '', re.sub(r'^.*N\\s', '', self.text))", "Software is # furnished to do so, subject to the", "Keyring. Writing your password to the keyring initially is as", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "unseen messages present. I've configured it to only work with", "whom the Software is # furnished to do so, subject", "will scan one of your imap email boxes and report", "sublicense, and/or sell # copies of the Software, and to", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "substantial portions of the Software. # # THE SOFTWARE IS", "993) if self.password == '<PASSWORD>': self.text = 'Gnome Keyring Error'", "do so, subject to the following conditions: # # The", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "following contents:: [backend] default-keyring=keyring.backends.Gnome.Keyring keyring-path=/home/<userid>/.local/share/keyring/ 2) Execute the following python", "in the Software without restriction, including without limitation the rights", "your userid and password): 1) create the file ~/.local/share/python_keyring/keyringrc.cfg with", "# furnished to do so, subject to the following conditions:", "utf-8 -*- # Copyright (c) 2015 <NAME> # # Permission", "im.login(self.user, self.password) status, response = im.status(self.mbox, '(UNSEEN)') self.text = response[0].decode()", "any person obtaining a copy # of this software and", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "the mbox setting would be: ``mbox=\"~/Maildir/HomeMail/fred\"``. Note the nested sets", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "setting would be: ``mbox=\"~/Maildir/HomeMail/fred\"``. Note the nested sets of quotes!", "shall be included in # all copies or substantial portions", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "the following python shell script once:: #!/usr/bin/env python3 import keyring", "restriction, including without limitation the rights # to use, copy,", "self.password = password else: logger.critical('Gnome Keyring Error') def poll(self): im", "logger.critical('Gnome Keyring Error') def poll(self): im = imaplib.IMAP4_SSL(self.server, 993) if", "Keyring Error' else: im.login(self.user, self.password) status, response = im.status(self.mbox, '(UNSEEN)')", "to fetch'), ('label', 'INBOX', 'label for display'), ('user', None, 'email", "None: self.password = password else: logger.critical('Gnome Keyring Error') def poll(self):", "including without limitation the rights # to use, copy, modify,", "copyright notice and this permission notice shall be included in", "mailbox at HomeMail/fred, the mbox setting would be: ``mbox=\"~/Maildir/HomeMail/fred\"``. Note", "status, response = im.status(self.mbox, '(UNSEEN)') self.text = response[0].decode() self.text =", "2015 <NAME> # # Permission is hereby granted, free of", "base class ImapWidget(base.ThreadPoolText): \"\"\"Email IMAP widget This widget will scan", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "free of charge, to any person obtaining a copy #", "files (the \"Software\"), to deal # in the Software without", "<PASSWORD>_password('imapwidget', self.user) if password is not None: self.password = password", "python shell script once:: #!/usr/bin/env python3 import keyring user =", "~/.local/share/python_keyring/keyringrc.cfg with the following contents:: [backend] default-keyring=keyring.backends.Gnome.Keyring keyring-path=/home/<userid>/.local/share/keyring/ 2) Execute", "class ImapWidget(base.ThreadPoolText): \"\"\"Email IMAP widget This widget will scan one", "import imaplib import re import keyring from libqtile.log_utils import logger", "coding: utf-8 -*- # Copyright (c) 2015 <NAME> # #", "quotes! Labels can be whatever you choose, of course. Widget", "<NAME> # # Permission is hereby granted, free of charge,", "you want to look at the mailbox at HomeMail/fred, the", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "be whatever you choose, of course. Widget requirements: keyring_. ..", "it to only work with imap with ssl. Your password", "**config) self.add_defaults(ImapWidget.defaults) password = <PASSWORD>_password('imapwidget', self.user) if password is not", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "contents:: [backend] default-keyring=keyring.backends.Gnome.Keyring keyring-path=/home/<userid>/.local/share/keyring/ 2) Execute the following python shell", "None, 'email server name'), ] def __init__(self, **config): base.ThreadPoolText.__init__(self, \"\",", "'<PASSWORD>': self.text = 'Gnome Keyring Error' else: im.login(self.user, self.password) status,", "Labels can be whatever you choose, of course. Widget requirements:", "+ ': ' + re.sub(r'\\).*$', '', re.sub(r'^.*N\\s', '', self.text)) im.logout()", "keyring.set_password('imapwidget', user, password) mbox names must include the path to", "1) create the file ~/.local/share/python_keyring/keyringrc.cfg with the following contents:: [backend]", "widget will scan one of your imap email boxes and", "be: ``mbox=\"~/Maildir/HomeMail/fred\"``. Note the nested sets of quotes! Labels can", "of this software and associated documentation files (the \"Software\"), to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "<password> for your userid and password): 1) create the file", "work with imap with ssl. Your password is obtained from", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "can be whatever you choose, of course. Widget requirements: keyring_.", "(the \"Software\"), to deal # in the Software without restriction,", "def __init__(self, **config): base.ThreadPoolText.__init__(self, \"\", **config) self.add_defaults(ImapWidget.defaults) password = <PASSWORD>_password('imapwidget',", "choose, of course. Widget requirements: keyring_. .. _keyring: https://pypi.org/project/keyring/ \"\"\"", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "charge, to any person obtaining a copy # of this", "permit persons to whom the Software is # furnished to", "from the Gnome Keyring. Writing your password to the keyring", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "the Software is # furnished to do so, subject to", "above copyright notice and this permission notice shall be included", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "initially is as simple as (changing out <userid> and <password>", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "limitation the rights # to use, copy, modify, merge, publish,", "this permission notice shall be included in # all copies", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "defaults = [ ('mbox', '\"INBOX\"', 'mailbox to fetch'), ('label', 'INBOX',", "== '<PASSWORD>': self.text = 'Gnome Keyring Error' else: im.login(self.user, self.password)", "without limitation the rights # to use, copy, modify, merge,", "= imaplib.IMAP4_SSL(self.server, 993) if self.password == '<PASSWORD>': self.text = 'Gnome", "self.password) status, response = im.status(self.mbox, '(UNSEEN)') self.text = response[0].decode() self.text", "= im.status(self.mbox, '(UNSEEN)') self.text = response[0].decode() self.text = self.label +", "the default INBOX). So, for example if your mailroot is", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "once:: #!/usr/bin/env python3 import keyring user = <userid> password =", "# in the Software without restriction, including without limitation the", "documentation files (the \"Software\"), to deal # in the Software", "def poll(self): im = imaplib.IMAP4_SSL(self.server, 993) if self.password == '<PASSWORD>':", "copies or substantial portions of the Software. # # THE", "and <password> for your userid and password): 1) create the", "This widget will scan one of your imap email boxes", "the mailbox at HomeMail/fred, the mbox setting would be: ``mbox=\"~/Maildir/HomeMail/fred\"``.", "display'), ('user', None, 'email username'), ('server', None, 'email server name'),", "('mbox', '\"INBOX\"', 'mailbox to fetch'), ('label', 'INBOX', 'label for display'),", "only work with imap with ssl. Your password is obtained", "password) mbox names must include the path to the mbox", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "to the keyring initially is as simple as (changing out", "sell # copies of the Software, and to permit persons", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "nested sets of quotes! Labels can be whatever you choose,", "import keyring from libqtile.log_utils import logger from libqtile.widget import base", "('user', None, 'email username'), ('server', None, 'email server name'), ]", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "publish, distribute, sublicense, and/or sell # copies of the Software,", "to the following conditions: # # The above copyright notice", "the keyring initially is as simple as (changing out <userid>", "your password to the keyring initially is as simple as", "Note the nested sets of quotes! Labels can be whatever", "names must include the path to the mbox (except for", "[backend] default-keyring=keyring.backends.Gnome.Keyring keyring-path=/home/<userid>/.local/share/keyring/ 2) Execute the following python shell script", "is obtained from the Gnome Keyring. Writing your password to", "Your password is obtained from the Gnome Keyring. Writing your", "modify, merge, publish, distribute, sublicense, and/or sell # copies of", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "HomeMail/fred, the mbox setting would be: ``mbox=\"~/Maildir/HomeMail/fred\"``. Note the nested", "for display'), ('user', None, 'email username'), ('server', None, 'email server", "__init__(self, **config): base.ThreadPoolText.__init__(self, \"\", **config) self.add_defaults(ImapWidget.defaults) password = <PASSWORD>_password('imapwidget', self.user)", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "Software, and to permit persons to whom the Software is", "want to look at the mailbox at HomeMail/fred, the mbox", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or", "\"\"\" defaults = [ ('mbox', '\"INBOX\"', 'mailbox to fetch'), ('label',", "``mbox=\"~/Maildir/HomeMail/fred\"``. Note the nested sets of quotes! Labels can be", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "# Copyright (c) 2015 <NAME> # # Permission is hereby", "keyring_. .. _keyring: https://pypi.org/project/keyring/ \"\"\" defaults = [ ('mbox', '\"INBOX\"',", "the following contents:: [backend] default-keyring=keyring.backends.Gnome.Keyring keyring-path=/home/<userid>/.local/share/keyring/ 2) Execute the following", "\"Software\"), to deal # in the Software without restriction, including", "2) Execute the following python shell script once:: #!/usr/bin/env python3", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "with imap with ssl. Your password is obtained from the", "poll(self): im = imaplib.IMAP4_SSL(self.server, 993) if self.password == '<PASSWORD>': self.text", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "of your imap email boxes and report the number of", "password = <password> keyring.set_password('imapwidget', user, password) mbox names must include", "out <userid> and <password> for your userid and password): 1)", "# copies of the Software, and to permit persons to", "granted, free of charge, to any person obtaining a copy", "obtaining a copy # of this software and associated documentation", "the Gnome Keyring. Writing your password to the keyring initially", "imaplib.IMAP4_SSL(self.server, 993) if self.password == '<PASSWORD>': self.text = 'Gnome Keyring", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "<userid> and <password> for your userid and password): 1) create", "is # furnished to do so, subject to the following", "imaplib import re import keyring from libqtile.log_utils import logger from", "to whom the Software is # furnished to do so,", "copy # of this software and associated documentation files (the", "with the following contents:: [backend] default-keyring=keyring.backends.Gnome.Keyring keyring-path=/home/<userid>/.local/share/keyring/ 2) Execute the", "if password is not None: self.password = password else: logger.critical('Gnome", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "Permission is hereby granted, free of charge, to any person", "file ~/.local/share/python_keyring/keyringrc.cfg with the following contents:: [backend] default-keyring=keyring.backends.Gnome.Keyring keyring-path=/home/<userid>/.local/share/keyring/ 2)", "to only work with imap with ssl. Your password is", "one of your imap email boxes and report the number", "as simple as (changing out <userid> and <password> for your", "The above copyright notice and this permission notice shall be", "report the number of unseen messages present. I've configured it", "simple as (changing out <userid> and <password> for your userid", "``~/Maildir``, and you want to look at the mailbox at", "response = im.status(self.mbox, '(UNSEEN)') self.text = response[0].decode() self.text = self.label", "from libqtile.widget import base class ImapWidget(base.ThreadPoolText): \"\"\"Email IMAP widget This", "else: im.login(self.user, self.password) status, response = im.status(self.mbox, '(UNSEEN)') self.text =", "is as simple as (changing out <userid> and <password> for", "= 'Gnome Keyring Error' else: im.login(self.user, self.password) status, response =", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "SOFTWARE. import imaplib import re import keyring from libqtile.log_utils import", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "to permit persons to whom the Software is # furnished", "\"\"\"Email IMAP widget This widget will scan one of your", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING" ]
[ "test_game_def_permission_denied_if_started(api_client): post_game_def_response = create_game_definition(api_client) game_def_id = post_game_def_response.data[\"id\"] GameDefinition.objects.filter(id=game_def_id).update(started=True) get_game_def_response =", "= create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id = post_game_def_response.data[\"id\"] int_game_def_id", "GameDefinition, AppUser def create_game_definition(api_client: APIClient) -> Response: return api_client.post(\"/api/game_definition\") def", "get_game_def_response.data == { 'detail': 'К игре уже нельзя подключиться' }", "def create_game_definition(api_client: APIClient) -> Response: return api_client.post(\"/api/game_definition\") def get_game_definition(api_client: APIClient,", "from rest_framework.response import Response from rest_framework.test import APIClient from game.models", "import APIClient from game.models import GameDefinition, AppUser def create_game_definition(api_client: APIClient)", "post_game_def_response.status_code == 201 game_def_id = post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) user2", "test_returns_game_def_to_another_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id =", "str) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 200 assert", "assert isinstance(game_def_id, str) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code ==", "201 game_def_id = post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) get_game_def_response = get_game_definition(api_client,", "def get_game_definition(api_client: APIClient, game_def_id: str) -> Response: return api_client.get(f\"/api/game_definition/{game_def_id}\") def", "def test_returns_game_def_to_the_current_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id", "404 assert get_game_def_response.data == {\"detail\": \"Страница не найдена.\"} def test_game_def_permission_denied_if_started(api_client):", "test_game_def_not_found_by_int_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id =", "game_def_id = post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) get_game_def_response = get_game_definition(api_client, game_def_id)", "= create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id = post_game_def_response.data[\"id\"] assert", "assert get_game_def_response.data == post_game_def_response.data def test_game_def_not_found_by_int_id(api_client): post_game_def_response = create_game_definition(api_client) assert", "game_def_id = post_game_def_response.data[\"id\"] GameDefinition.objects.filter(id=game_def_id).update(started=True) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code", "get_game_def_response.data == post_game_def_response.data def test_returns_game_def_to_another_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code", "isinstance(int_game_def_id, int) get_game_def_response = get_game_definition( api_client, str(int_game_def_id) ) assert get_game_def_response.status_code", "403 assert get_game_def_response.data == { 'detail': 'К игре уже нельзя", "= post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) user2 = AppUser.objects.create(vk_id=2, username=2) api_client.force_authenticate(user2)", "== post_game_def_response.data def test_returns_game_def_to_another_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code ==", "get_game_def_response.status_code == 200 assert get_game_def_response.data == post_game_def_response.data def test_returns_game_def_to_another_user_by_hash_id(api_client): post_game_def_response", "create_game_definition(api_client: APIClient) -> Response: return api_client.post(\"/api/game_definition\") def get_game_definition(api_client: APIClient, game_def_id:", "post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) user2 = AppUser.objects.create(vk_id=2, username=2) api_client.force_authenticate(user2) get_game_def_response", "= get_game_definition( api_client, str(int_game_def_id) ) assert get_game_def_response.status_code == 404 assert", "return api_client.post(\"/api/game_definition\") def get_game_definition(api_client: APIClient, game_def_id: str) -> Response: return", "= post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) get_game_def_response = get_game_definition(api_client, game_def_id) assert", "post_game_def_response.status_code == 201 game_def_id = post_game_def_response.data[\"id\"] int_game_def_id = GameDefinition.objects.get(pk=game_def_id).id.id assert", "get_game_definition( api_client, str(int_game_def_id) ) assert get_game_def_response.status_code == 404 assert get_game_def_response.data", "post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code", "== 403 assert get_game_def_response.data == { 'detail': 'К игре уже", "api_client.get(f\"/api/game_definition/{game_def_id}\") def test_returns_game_def_to_the_current_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code == 201", "test_returns_game_def_to_the_current_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id =", "game_def_id = post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) user2 = AppUser.objects.create(vk_id=2, username=2)", "post_game_def_response.status_code == 201 game_def_id = post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) get_game_def_response", "APIClient) -> Response: return api_client.post(\"/api/game_definition\") def get_game_definition(api_client: APIClient, game_def_id: str)", "\"Страница не найдена.\"} def test_game_def_permission_denied_if_started(api_client): post_game_def_response = create_game_definition(api_client) game_def_id =", "create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id = post_game_def_response.data[\"id\"] int_game_def_id =", "AppUser.objects.create(vk_id=2, username=2) api_client.force_authenticate(user2) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code ==", "GameDefinition.objects.filter(id=game_def_id).update(started=True) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 403 assert", "return api_client.get(f\"/api/game_definition/{game_def_id}\") def test_returns_game_def_to_the_current_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code ==", "game_def_id: str) -> Response: return api_client.get(f\"/api/game_definition/{game_def_id}\") def test_returns_game_def_to_the_current_user_by_hash_id(api_client): post_game_def_response =", "-> Response: return api_client.get(f\"/api/game_definition/{game_def_id}\") def test_returns_game_def_to_the_current_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert", "post_game_def_response.data[\"id\"] int_game_def_id = GameDefinition.objects.get(pk=game_def_id).id.id assert isinstance(int_game_def_id, int) get_game_def_response = get_game_definition(", "post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id = post_game_def_response.data[\"id\"]", "200 assert get_game_def_response.data == post_game_def_response.data def test_returns_game_def_to_another_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client)", "assert get_game_def_response.status_code == 404 assert get_game_def_response.data == {\"detail\": \"Страница не", "rest_framework.response import Response from rest_framework.test import APIClient from game.models import", "user2 = AppUser.objects.create(vk_id=2, username=2) api_client.force_authenticate(user2) get_game_def_response = get_game_definition(api_client, game_def_id) assert", "post_game_def_response.data def test_game_def_not_found_by_int_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code == 201", "assert isinstance(game_def_id, str) user2 = AppUser.objects.create(vk_id=2, username=2) api_client.force_authenticate(user2) get_game_def_response =", "= get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 403 assert get_game_def_response.data ==", "assert isinstance(int_game_def_id, int) get_game_def_response = get_game_definition( api_client, str(int_game_def_id) ) assert", "{\"detail\": \"Страница не найдена.\"} def test_game_def_permission_denied_if_started(api_client): post_game_def_response = create_game_definition(api_client) game_def_id", "create_game_definition(api_client) game_def_id = post_game_def_response.data[\"id\"] GameDefinition.objects.filter(id=game_def_id).update(started=True) get_game_def_response = get_game_definition(api_client, game_def_id) assert", "assert get_game_def_response.status_code == 403 assert get_game_def_response.data == { 'detail': 'К", "rest_framework.test import APIClient from game.models import GameDefinition, AppUser def create_game_definition(api_client:", "get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 403 assert get_game_def_response.data", "== 200 assert get_game_def_response.data == post_game_def_response.data def test_returns_game_def_to_another_user_by_hash_id(api_client): post_game_def_response =", "create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id = post_game_def_response.data[\"id\"] assert isinstance(game_def_id,", "= create_game_definition(api_client) game_def_id = post_game_def_response.data[\"id\"] GameDefinition.objects.filter(id=game_def_id).update(started=True) get_game_def_response = get_game_definition(api_client, game_def_id)", "= GameDefinition.objects.get(pk=game_def_id).id.id assert isinstance(int_game_def_id, int) get_game_def_response = get_game_definition( api_client, str(int_game_def_id)", "str) user2 = AppUser.objects.create(vk_id=2, username=2) api_client.force_authenticate(user2) get_game_def_response = get_game_definition(api_client, game_def_id)", "game_def_id) assert get_game_def_response.status_code == 200 assert get_game_def_response.data == post_game_def_response.data def", "Response from rest_framework.test import APIClient from game.models import GameDefinition, AppUser", "assert get_game_def_response.data == post_game_def_response.data def test_returns_game_def_to_another_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert", "== 404 assert get_game_def_response.data == {\"detail\": \"Страница не найдена.\"} def", "== 201 game_def_id = post_game_def_response.data[\"id\"] int_game_def_id = GameDefinition.objects.get(pk=game_def_id).id.id assert isinstance(int_game_def_id,", "get_game_def_response.status_code == 403 assert get_game_def_response.data == { 'detail': 'К игре", "== post_game_def_response.data def test_game_def_not_found_by_int_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code ==", "api_client.post(\"/api/game_definition\") def get_game_definition(api_client: APIClient, game_def_id: str) -> Response: return api_client.get(f\"/api/game_definition/{game_def_id}\")", "post_game_def_response.data[\"id\"] GameDefinition.objects.filter(id=game_def_id).update(started=True) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 403", "Response: return api_client.get(f\"/api/game_definition/{game_def_id}\") def test_returns_game_def_to_the_current_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code", "str(int_game_def_id) ) assert get_game_def_response.status_code == 404 assert get_game_def_response.data == {\"detail\":", "APIClient, game_def_id: str) -> Response: return api_client.get(f\"/api/game_definition/{game_def_id}\") def test_returns_game_def_to_the_current_user_by_hash_id(api_client): post_game_def_response", "int_game_def_id = GameDefinition.objects.get(pk=game_def_id).id.id assert isinstance(int_game_def_id, int) get_game_def_response = get_game_definition( api_client,", "assert get_game_def_response.data == { 'detail': 'К игре уже нельзя подключиться'", "APIClient from game.models import GameDefinition, AppUser def create_game_definition(api_client: APIClient) ->", "def test_returns_game_def_to_another_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id", "assert get_game_def_response.status_code == 200 assert get_game_def_response.data == post_game_def_response.data def test_game_def_not_found_by_int_id(api_client):", "import Response from rest_framework.test import APIClient from game.models import GameDefinition,", "post_game_def_response.data def test_returns_game_def_to_another_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code == 201", "== 200 assert get_game_def_response.data == post_game_def_response.data def test_game_def_not_found_by_int_id(api_client): post_game_def_response =", "get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 403 assert get_game_def_response.data == {", "найдена.\"} def test_game_def_permission_denied_if_started(api_client): post_game_def_response = create_game_definition(api_client) game_def_id = post_game_def_response.data[\"id\"] GameDefinition.objects.filter(id=game_def_id).update(started=True)", "get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 200 assert get_game_def_response.data", "= AppUser.objects.create(vk_id=2, username=2) api_client.force_authenticate(user2) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code", "api_client.force_authenticate(user2) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 200 assert", "int) get_game_def_response = get_game_definition( api_client, str(int_game_def_id) ) assert get_game_def_response.status_code ==", "== 201 game_def_id = post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) get_game_def_response =", "post_game_def_response = create_game_definition(api_client) game_def_id = post_game_def_response.data[\"id\"] GameDefinition.objects.filter(id=game_def_id).update(started=True) get_game_def_response = get_game_definition(api_client,", "isinstance(game_def_id, str) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 200", "def test_game_def_permission_denied_if_started(api_client): post_game_def_response = create_game_definition(api_client) game_def_id = post_game_def_response.data[\"id\"] GameDefinition.objects.filter(id=game_def_id).update(started=True) get_game_def_response", "get_game_def_response.data == post_game_def_response.data def test_game_def_not_found_by_int_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code", "assert post_game_def_response.status_code == 201 game_def_id = post_game_def_response.data[\"id\"] int_game_def_id = GameDefinition.objects.get(pk=game_def_id).id.id", "Response: return api_client.post(\"/api/game_definition\") def get_game_definition(api_client: APIClient, game_def_id: str) -> Response:", "== 201 game_def_id = post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) user2 =", "не найдена.\"} def test_game_def_permission_denied_if_started(api_client): post_game_def_response = create_game_definition(api_client) game_def_id = post_game_def_response.data[\"id\"]", "api_client, str(int_game_def_id) ) assert get_game_def_response.status_code == 404 assert get_game_def_response.data ==", "assert get_game_def_response.status_code == 200 assert get_game_def_response.data == post_game_def_response.data def test_returns_game_def_to_another_user_by_hash_id(api_client):", ") assert get_game_def_response.status_code == 404 assert get_game_def_response.data == {\"detail\": \"Страница", "201 game_def_id = post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str) user2 = AppUser.objects.create(vk_id=2,", "str) -> Response: return api_client.get(f\"/api/game_definition/{game_def_id}\") def test_returns_game_def_to_the_current_user_by_hash_id(api_client): post_game_def_response = create_game_definition(api_client)", "game_def_id) assert get_game_def_response.status_code == 403 assert get_game_def_response.data == { 'detail':", "<reponame>dimadk24/english-fight-api from rest_framework.response import Response from rest_framework.test import APIClient from", "get_game_def_response = get_game_definition( api_client, str(int_game_def_id) ) assert get_game_def_response.status_code == 404", "game.models import GameDefinition, AppUser def create_game_definition(api_client: APIClient) -> Response: return", "= post_game_def_response.data[\"id\"] int_game_def_id = GameDefinition.objects.get(pk=game_def_id).id.id assert isinstance(int_game_def_id, int) get_game_def_response =", "get_game_definition(api_client: APIClient, game_def_id: str) -> Response: return api_client.get(f\"/api/game_definition/{game_def_id}\") def test_returns_game_def_to_the_current_user_by_hash_id(api_client):", "import GameDefinition, AppUser def create_game_definition(api_client: APIClient) -> Response: return api_client.post(\"/api/game_definition\")", "-> Response: return api_client.post(\"/api/game_definition\") def get_game_definition(api_client: APIClient, game_def_id: str) ->", "from rest_framework.test import APIClient from game.models import GameDefinition, AppUser def", "AppUser def create_game_definition(api_client: APIClient) -> Response: return api_client.post(\"/api/game_definition\") def get_game_definition(api_client:", "assert post_game_def_response.status_code == 201 game_def_id = post_game_def_response.data[\"id\"] assert isinstance(game_def_id, str)", "assert get_game_def_response.data == {\"detail\": \"Страница не найдена.\"} def test_game_def_permission_denied_if_started(api_client): post_game_def_response", "= get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 200 assert get_game_def_response.data ==", "game_def_id = post_game_def_response.data[\"id\"] int_game_def_id = GameDefinition.objects.get(pk=game_def_id).id.id assert isinstance(int_game_def_id, int) get_game_def_response", "get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 200 assert get_game_def_response.data == post_game_def_response.data", "201 game_def_id = post_game_def_response.data[\"id\"] int_game_def_id = GameDefinition.objects.get(pk=game_def_id).id.id assert isinstance(int_game_def_id, int)", "get_game_def_response.data == {\"detail\": \"Страница не найдена.\"} def test_game_def_permission_denied_if_started(api_client): post_game_def_response =", "def test_game_def_not_found_by_int_id(api_client): post_game_def_response = create_game_definition(api_client) assert post_game_def_response.status_code == 201 game_def_id", "200 assert get_game_def_response.data == post_game_def_response.data def test_game_def_not_found_by_int_id(api_client): post_game_def_response = create_game_definition(api_client)", "GameDefinition.objects.get(pk=game_def_id).id.id assert isinstance(int_game_def_id, int) get_game_def_response = get_game_definition( api_client, str(int_game_def_id) )", "== {\"detail\": \"Страница не найдена.\"} def test_game_def_permission_denied_if_started(api_client): post_game_def_response = create_game_definition(api_client)", "get_game_def_response.status_code == 404 assert get_game_def_response.data == {\"detail\": \"Страница не найдена.\"}", "get_game_def_response.status_code == 200 assert get_game_def_response.data == post_game_def_response.data def test_game_def_not_found_by_int_id(api_client): post_game_def_response", "username=2) api_client.force_authenticate(user2) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code == 200", "from game.models import GameDefinition, AppUser def create_game_definition(api_client: APIClient) -> Response:", "isinstance(game_def_id, str) user2 = AppUser.objects.create(vk_id=2, username=2) api_client.force_authenticate(user2) get_game_def_response = get_game_definition(api_client,", "= post_game_def_response.data[\"id\"] GameDefinition.objects.filter(id=game_def_id).update(started=True) get_game_def_response = get_game_definition(api_client, game_def_id) assert get_game_def_response.status_code ==" ]
[ "for string in strings: N = len(string) output_string += string", "length of the string. Print the concatenated string. strings =", "= input().split() output_string = \"\" for string in strings: N", "# 2. Repeat Strings # Write a Program That Reads", "the string. Print the concatenated string. strings = input().split() output_string", "Each string is repeated N times, where N is the", "N times, where N is the length of the string.", "string. strings = input().split() output_string = \"\" for string in", "N is the length of the string. Print the concatenated", "output_string = \"\" for string in strings: N = len(string)", "a Program That Reads a list of strings. Each string", "# Write a Program That Reads a list of strings.", "is repeated N times, where N is the length of", "strings. Each string is repeated N times, where N is", "strings: N = len(string) output_string += string * N print(output_string)", "is the length of the string. Print the concatenated string.", "Repeat Strings # Write a Program That Reads a list", "of the string. Print the concatenated string. strings = input().split()", "\"\" for string in strings: N = len(string) output_string +=", "times, where N is the length of the string. Print", "That Reads a list of strings. Each string is repeated", "where N is the length of the string. Print the", "of strings. Each string is repeated N times, where N", "the length of the string. Print the concatenated string. strings", "Reads a list of strings. Each string is repeated N", "Program That Reads a list of strings. Each string is", "Print the concatenated string. strings = input().split() output_string = \"\"", "list of strings. Each string is repeated N times, where", "string is repeated N times, where N is the length", "Strings # Write a Program That Reads a list of", "= \"\" for string in strings: N = len(string) output_string", "input().split() output_string = \"\" for string in strings: N =", "the concatenated string. strings = input().split() output_string = \"\" for", "repeated N times, where N is the length of the", "a list of strings. Each string is repeated N times,", "Write a Program That Reads a list of strings. Each", "in strings: N = len(string) output_string += string * N", "string. Print the concatenated string. strings = input().split() output_string =", "concatenated string. strings = input().split() output_string = \"\" for string", "strings = input().split() output_string = \"\" for string in strings:", "2. Repeat Strings # Write a Program That Reads a", "string in strings: N = len(string) output_string += string *" ]
[ "def test_args(): arg_parser = get_arg_parser() CleanupAWSLoadbalancersPlugin.add_args(arg_parser) arg_parser.parse_args() assert ArgumentParser.args.cleanup_aws_loadbalancers is", "import CleanupAWSLoadbalancersPlugin def test_args(): arg_parser = get_arg_parser() CleanupAWSLoadbalancersPlugin.add_args(arg_parser) arg_parser.parse_args() assert", "arg_parser.parse_args() assert ArgumentParser.args.cleanup_aws_loadbalancers is False assert ArgumentParser.args.cleanup_aws_loadbalancers_age == \"7 days\"", "cklib.args import get_arg_parser, ArgumentParser from cloudkeeper_plugin_cleanup_aws_loadbalancers import CleanupAWSLoadbalancersPlugin def test_args():", "arg_parser = get_arg_parser() CleanupAWSLoadbalancersPlugin.add_args(arg_parser) arg_parser.parse_args() assert ArgumentParser.args.cleanup_aws_loadbalancers is False assert", "get_arg_parser, ArgumentParser from cloudkeeper_plugin_cleanup_aws_loadbalancers import CleanupAWSLoadbalancersPlugin def test_args(): arg_parser =", "get_arg_parser() CleanupAWSLoadbalancersPlugin.add_args(arg_parser) arg_parser.parse_args() assert ArgumentParser.args.cleanup_aws_loadbalancers is False assert ArgumentParser.args.cleanup_aws_loadbalancers_age ==", "test_args(): arg_parser = get_arg_parser() CleanupAWSLoadbalancersPlugin.add_args(arg_parser) arg_parser.parse_args() assert ArgumentParser.args.cleanup_aws_loadbalancers is False", "CleanupAWSLoadbalancersPlugin def test_args(): arg_parser = get_arg_parser() CleanupAWSLoadbalancersPlugin.add_args(arg_parser) arg_parser.parse_args() assert ArgumentParser.args.cleanup_aws_loadbalancers", "from cloudkeeper_plugin_cleanup_aws_loadbalancers import CleanupAWSLoadbalancersPlugin def test_args(): arg_parser = get_arg_parser() CleanupAWSLoadbalancersPlugin.add_args(arg_parser)", "CleanupAWSLoadbalancersPlugin.add_args(arg_parser) arg_parser.parse_args() assert ArgumentParser.args.cleanup_aws_loadbalancers is False assert ArgumentParser.args.cleanup_aws_loadbalancers_age == \"7", "cloudkeeper_plugin_cleanup_aws_loadbalancers import CleanupAWSLoadbalancersPlugin def test_args(): arg_parser = get_arg_parser() CleanupAWSLoadbalancersPlugin.add_args(arg_parser) arg_parser.parse_args()", "from cklib.args import get_arg_parser, ArgumentParser from cloudkeeper_plugin_cleanup_aws_loadbalancers import CleanupAWSLoadbalancersPlugin def", "= get_arg_parser() CleanupAWSLoadbalancersPlugin.add_args(arg_parser) arg_parser.parse_args() assert ArgumentParser.args.cleanup_aws_loadbalancers is False assert ArgumentParser.args.cleanup_aws_loadbalancers_age", "import get_arg_parser, ArgumentParser from cloudkeeper_plugin_cleanup_aws_loadbalancers import CleanupAWSLoadbalancersPlugin def test_args(): arg_parser", "ArgumentParser from cloudkeeper_plugin_cleanup_aws_loadbalancers import CleanupAWSLoadbalancersPlugin def test_args(): arg_parser = get_arg_parser()" ]
[ "document.', max_length=255, null=True, verbose_name='Value'), ), migrations.AlterField( model_name='metadatatype', name='name', field=models.CharField(help_text='Name used", "-*- coding: utf-8 -*- # Generated by Django 1.11.11 on", "Django 1.11.11 on 2018-09-17 06:45 from __future__ import unicode_literals from", "2018-09-17 06:45 from __future__ import unicode_literals from django.db import migrations,", "value stored in the metadata type field for the document.',", "operations = [ migrations.AlterField( model_name='documentmetadata', name='value', field=models.CharField(blank=True, db_index=True, help_text='The actual", "verbose_name='Value'), ), migrations.AlterField( model_name='metadatatype', name='name', field=models.CharField(help_text='Name used by other apps", "('metadata', '0010_auto_20180823_2353'), ] operations = [ migrations.AlterField( model_name='documentmetadata', name='value', field=models.CharField(blank=True,", "apps to reference this metadata type. Do not use python", "max_length=255, null=True, verbose_name='Value'), ), migrations.AlterField( model_name='metadatatype', name='name', field=models.CharField(help_text='Name used by", "this metadata type. Do not use python reserved words, or", "coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-09-17", "model_name='documentmetadata', name='value', field=models.CharField(blank=True, db_index=True, help_text='The actual value stored in the", "-*- # Generated by Django 1.11.11 on 2018-09-17 06:45 from", "utf-8 -*- # Generated by Django 1.11.11 on 2018-09-17 06:45", "for the document.', max_length=255, null=True, verbose_name='Value'), ), migrations.AlterField( model_name='metadatatype', name='name',", "field=models.CharField(help_text='Name used by other apps to reference this metadata type.", "), migrations.AlterField( model_name='metadatatype', name='name', field=models.CharField(help_text='Name used by other apps to", "[ migrations.AlterField( model_name='documentmetadata', name='value', field=models.CharField(blank=True, db_index=True, help_text='The actual value stored", "name='value', field=models.CharField(blank=True, db_index=True, help_text='The actual value stored in the metadata", "Do not use python reserved words, or spaces.', max_length=48, unique=True,", "models class Migration(migrations.Migration): dependencies = [ ('metadata', '0010_auto_20180823_2353'), ] operations", "metadata type. Do not use python reserved words, or spaces.',", "type field for the document.', max_length=255, null=True, verbose_name='Value'), ), migrations.AlterField(", "1.11.11 on 2018-09-17 06:45 from __future__ import unicode_literals from django.db", "] operations = [ migrations.AlterField( model_name='documentmetadata', name='value', field=models.CharField(blank=True, db_index=True, help_text='The", "migrations.AlterField( model_name='documentmetadata', name='value', field=models.CharField(blank=True, db_index=True, help_text='The actual value stored in", "06:45 from __future__ import unicode_literals from django.db import migrations, models", "unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "type. Do not use python reserved words, or spaces.', max_length=48,", "'0010_auto_20180823_2353'), ] operations = [ migrations.AlterField( model_name='documentmetadata', name='value', field=models.CharField(blank=True, db_index=True,", "not use python reserved words, or spaces.', max_length=48, unique=True, verbose_name='Name'),", "metadata type field for the document.', max_length=255, null=True, verbose_name='Value'), ),", "field for the document.', max_length=255, null=True, verbose_name='Value'), ), migrations.AlterField( model_name='metadatatype',", "actual value stored in the metadata type field for the", "in the metadata type field for the document.', max_length=255, null=True,", "used by other apps to reference this metadata type. Do", "migrations.AlterField( model_name='metadatatype', name='name', field=models.CharField(help_text='Name used by other apps to reference", "stored in the metadata type field for the document.', max_length=255,", "name='name', field=models.CharField(help_text='Name used by other apps to reference this metadata", "reference this metadata type. Do not use python reserved words,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "= [ migrations.AlterField( model_name='documentmetadata', name='value', field=models.CharField(blank=True, db_index=True, help_text='The actual value", "Migration(migrations.Migration): dependencies = [ ('metadata', '0010_auto_20180823_2353'), ] operations = [", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('metadata',", "help_text='The actual value stored in the metadata type field for", "use python reserved words, or spaces.', max_length=48, unique=True, verbose_name='Name'), ),", "by Django 1.11.11 on 2018-09-17 06:45 from __future__ import unicode_literals", "[ ('metadata', '0010_auto_20180823_2353'), ] operations = [ migrations.AlterField( model_name='documentmetadata', name='value',", "other apps to reference this metadata type. Do not use", "to reference this metadata type. Do not use python reserved", "class Migration(migrations.Migration): dependencies = [ ('metadata', '0010_auto_20180823_2353'), ] operations =", "on 2018-09-17 06:45 from __future__ import unicode_literals from django.db import", "import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies", "field=models.CharField(blank=True, db_index=True, help_text='The actual value stored in the metadata type", "= [ ('metadata', '0010_auto_20180823_2353'), ] operations = [ migrations.AlterField( model_name='documentmetadata',", "dependencies = [ ('metadata', '0010_auto_20180823_2353'), ] operations = [ migrations.AlterField(", "by other apps to reference this metadata type. Do not", "db_index=True, help_text='The actual value stored in the metadata type field", "the document.', max_length=255, null=True, verbose_name='Value'), ), migrations.AlterField( model_name='metadatatype', name='name', field=models.CharField(help_text='Name", "# -*- coding: utf-8 -*- # Generated by Django 1.11.11", "migrations, models class Migration(migrations.Migration): dependencies = [ ('metadata', '0010_auto_20180823_2353'), ]", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('metadata', '0010_auto_20180823_2353'),", "the metadata type field for the document.', max_length=255, null=True, verbose_name='Value'),", "<reponame>prezi/mayan-edms<gh_stars>1-10 # -*- coding: utf-8 -*- # Generated by Django", "Generated by Django 1.11.11 on 2018-09-17 06:45 from __future__ import", "from __future__ import unicode_literals from django.db import migrations, models class", "__future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration):", "null=True, verbose_name='Value'), ), migrations.AlterField( model_name='metadatatype', name='name', field=models.CharField(help_text='Name used by other", "# Generated by Django 1.11.11 on 2018-09-17 06:45 from __future__", "model_name='metadatatype', name='name', field=models.CharField(help_text='Name used by other apps to reference this", "python reserved words, or spaces.', max_length=48, unique=True, verbose_name='Name'), ), ]" ]
[ "+ 1): graph[i].sort() dfs(V) visited = [False] * (N +", "True for n in graph[V]: if not visited[n]: dfs(n) def", "= True def bfs(V): visited[V] = True queue = [V]", "') for n in graph[now]: if not visited[n]: stack.append(n) visited[n]", "now = stack.pop() print(now, end=' ') for n in graph[now]:", "print(now, end=' ') for n in graph[now]: if not visited[n]:", "for n in graph[V]: if not visited[n]: dfs(n) def dfs_s(V):", "for i in range(M): a, b = map(int, input().strip().split()) graph[a].append(b)", "b = map(int, input().strip().split()) graph[a].append(b) graph[b].append(a) for i in range(1,", "map(int, input().strip().split()) visited = [False] * (N + 1) graph", "def dfs(V): print(V, end=' ') visited[V] = True for n", "graph[i].sort() dfs(V) visited = [False] * (N + 1) print()", "= [False] * (N + 1) graph = [[] for", "i in range(1, N + 1): graph[i].sort() dfs(V) visited =", "True def bfs(V): visited[V] = True queue = [V] while", "= map(int, input().strip().split()) visited = [False] * (N + 1)", "= [[] for _ in range(N + 1)] for i", "while stack: now = stack.pop() print(now, end=' ') for n", "visited[n]: stack.append(n) visited[n] = True def bfs(V): visited[V] = True", "_ in range(N + 1)] for i in range(M): a,", "True queue = [V] while queue: now = queue.pop(0) print(now,", "map(int, input().strip().split()) graph[a].append(b) graph[b].append(a) for i in range(1, N +", "visited[V] = True while stack: now = stack.pop() print(now, end='", "if not visited[n]: dfs(n) def dfs_s(V): stack = [V] visited[V]", "for i in range(1, N + 1): graph[i].sort() dfs(V) visited", "i in range(M): a, b = map(int, input().strip().split()) graph[a].append(b) graph[b].append(a)", "= queue.pop(0) print(now, end=' ') for n in graph[now]: if", "def bfs(V): visited[V] = True queue = [V] while queue:", "visited[n] = True def bfs(V): visited[V] = True queue =", "[V] while queue: now = queue.pop(0) print(now, end=' ') for", "stack.pop() print(now, end=' ') for n in graph[now]: if not", "end=' ') for n in graph[now]: if not visited[n]: stack.append(n)", "in range(M): a, b = map(int, input().strip().split()) graph[a].append(b) graph[b].append(a) for", "print(V, end=' ') visited[V] = True for n in graph[V]:", "visited = [False] * (N + 1) graph = [[]", "in graph[now]: if not visited[n]: stack.append(n) visited[n] = True def", "queue.pop(0) print(now, end=' ') for n in graph[now]: if not", "queue = [V] while queue: now = queue.pop(0) print(now, end='", "N, M, V = map(int, input().strip().split()) visited = [False] *", "n in graph[now]: if not visited[n]: queue.append(n) visited[n] = True", "graph[a].append(b) graph[b].append(a) for i in range(1, N + 1): graph[i].sort()", "def dfs_s(V): stack = [V] visited[V] = True while stack:", "dfs_s(V): stack = [V] visited[V] = True while stack: now", "queue: now = queue.pop(0) print(now, end=' ') for n in", "for n in graph[now]: if not visited[n]: stack.append(n) visited[n] =", "n in graph[V]: if not visited[n]: dfs(n) def dfs_s(V): stack", "N + 1): graph[i].sort() dfs(V) visited = [False] * (N", "visited[V] = True queue = [V] while queue: now =", "not visited[n]: stack.append(n) visited[n] = True def bfs(V): visited[V] =", "graph[now]: if not visited[n]: stack.append(n) visited[n] = True def bfs(V):", "+ 1) graph = [[] for _ in range(N +", "now = queue.pop(0) print(now, end=' ') for n in graph[now]:", "True while stack: now = stack.pop() print(now, end=' ') for", "if not visited[n]: queue.append(n) visited[n] = True N, M, V", "graph[now]: if not visited[n]: queue.append(n) visited[n] = True N, M,", "input().strip().split()) graph[a].append(b) graph[b].append(a) for i in range(1, N + 1):", "dfs(V): print(V, end=' ') visited[V] = True for n in", "* (N + 1) graph = [[] for _ in", "visited[n]: dfs(n) def dfs_s(V): stack = [V] visited[V] = True", "in range(1, N + 1): graph[i].sort() dfs(V) visited = [False]", "range(1, N + 1): graph[i].sort() dfs(V) visited = [False] *", "= stack.pop() print(now, end=' ') for n in graph[now]: if", "graph = [[] for _ in range(N + 1)] for", "[V] visited[V] = True while stack: now = stack.pop() print(now,", "not visited[n]: dfs(n) def dfs_s(V): stack = [V] visited[V] =", "True N, M, V = map(int, input().strip().split()) visited = [False]", "= map(int, input().strip().split()) graph[a].append(b) graph[b].append(a) for i in range(1, N", "visited[n]: queue.append(n) visited[n] = True N, M, V = map(int,", "') visited[V] = True for n in graph[V]: if not", "(N + 1) graph = [[] for _ in range(N", "in graph[V]: if not visited[n]: dfs(n) def dfs_s(V): stack =", "= [V] visited[V] = True while stack: now = stack.pop()", "range(M): a, b = map(int, input().strip().split()) graph[a].append(b) graph[b].append(a) for i", "M, V = map(int, input().strip().split()) visited = [False] * (N", "for n in graph[now]: if not visited[n]: queue.append(n) visited[n] =", "input().strip().split()) visited = [False] * (N + 1) graph =", "dfs(V) visited = [False] * (N + 1) print() bfs(V)", "graph[b].append(a) for i in range(1, N + 1): graph[i].sort() dfs(V)", "queue.append(n) visited[n] = True N, M, V = map(int, input().strip().split())", "stack = [V] visited[V] = True while stack: now =", "not visited[n]: queue.append(n) visited[n] = True N, M, V =", "n in graph[now]: if not visited[n]: stack.append(n) visited[n] = True", "[[] for _ in range(N + 1)] for i in", "bfs(V): visited[V] = True queue = [V] while queue: now", "= True queue = [V] while queue: now = queue.pop(0)", "while queue: now = queue.pop(0) print(now, end=' ') for n", "= True while stack: now = stack.pop() print(now, end=' ')", "end=' ') for n in graph[now]: if not visited[n]: queue.append(n)", "range(N + 1)] for i in range(M): a, b =", "in graph[now]: if not visited[n]: queue.append(n) visited[n] = True N,", "visited[n] = True N, M, V = map(int, input().strip().split()) visited", "= [V] while queue: now = queue.pop(0) print(now, end=' ')", "[False] * (N + 1) graph = [[] for _", "graph[V]: if not visited[n]: dfs(n) def dfs_s(V): stack = [V]", "in range(N + 1)] for i in range(M): a, b", "= True for n in graph[V]: if not visited[n]: dfs(n)", "if not visited[n]: stack.append(n) visited[n] = True def bfs(V): visited[V]", "visited[V] = True for n in graph[V]: if not visited[n]:", "stack.append(n) visited[n] = True def bfs(V): visited[V] = True queue", "= True N, M, V = map(int, input().strip().split()) visited =", "a, b = map(int, input().strip().split()) graph[a].append(b) graph[b].append(a) for i in", "<filename>algorithm/dfs/boj_1260.py def dfs(V): print(V, end=' ') visited[V] = True for", "end=' ') visited[V] = True for n in graph[V]: if", "V = map(int, input().strip().split()) visited = [False] * (N +", "') for n in graph[now]: if not visited[n]: queue.append(n) visited[n]", "1) graph = [[] for _ in range(N + 1)]", "stack: now = stack.pop() print(now, end=' ') for n in", "dfs(n) def dfs_s(V): stack = [V] visited[V] = True while", "1): graph[i].sort() dfs(V) visited = [False] * (N + 1)", "+ 1)] for i in range(M): a, b = map(int,", "for _ in range(N + 1)] for i in range(M):", "1)] for i in range(M): a, b = map(int, input().strip().split())" ]
[ "__credits__ = [\"<NAME>\", \"<NAME>\", \"<NAME>\"] __license__ = \"ASL\" class Redirector(webapp.RequestHandler):", "google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app __author__ = \"<NAME>,", "webapp.WSGIApplication( [('/.*', Redirector)], debug=True) def main(): run_wsgi_app(application) if __name__ ==", "\"<NAME>, <NAME>, and <NAME>\" __copyright__ = \"Copyright 2013-2015 UKP TU", "= \"ASL\" class Redirector(webapp.RequestHandler): def get(self): self.redirect(\"/argunit/home\") def post(self): self.redirect(\"/argunit/home\")", "\"ASL\" class Redirector(webapp.RequestHandler): def get(self): self.redirect(\"/argunit/home\") def post(self): self.redirect(\"/argunit/home\") application", "from google.appengine.ext.webapp.util import run_wsgi_app __author__ = \"<NAME>, <NAME>, and <NAME>\"", "and <NAME>\" __copyright__ = \"Copyright 2013-2015 UKP TU Darmstadt\" __credits__", "application = webapp.WSGIApplication( [('/.*', Redirector)], debug=True) def main(): run_wsgi_app(application) if", "[\"<NAME>\", \"<NAME>\", \"<NAME>\"] __license__ = \"ASL\" class Redirector(webapp.RequestHandler): def get(self):", "post(self): self.redirect(\"/argunit/home\") application = webapp.WSGIApplication( [('/.*', Redirector)], debug=True) def main():", "__license__ = \"ASL\" class Redirector(webapp.RequestHandler): def get(self): self.redirect(\"/argunit/home\") def post(self):", "class Redirector(webapp.RequestHandler): def get(self): self.redirect(\"/argunit/home\") def post(self): self.redirect(\"/argunit/home\") application =", "= [\"<NAME>\", \"<NAME>\", \"<NAME>\"] __license__ = \"ASL\" class Redirector(webapp.RequestHandler): def", "UKP TU Darmstadt\" __credits__ = [\"<NAME>\", \"<NAME>\", \"<NAME>\"] __license__ =", "self.redirect(\"/argunit/home\") application = webapp.WSGIApplication( [('/.*', Redirector)], debug=True) def main(): run_wsgi_app(application)", "Redirector(webapp.RequestHandler): def get(self): self.redirect(\"/argunit/home\") def post(self): self.redirect(\"/argunit/home\") application = webapp.WSGIApplication(", "2013-2015 UKP TU Darmstadt\" __credits__ = [\"<NAME>\", \"<NAME>\", \"<NAME>\"] __license__", "\"<NAME>\"] __license__ = \"ASL\" class Redirector(webapp.RequestHandler): def get(self): self.redirect(\"/argunit/home\") def", "def get(self): self.redirect(\"/argunit/home\") def post(self): self.redirect(\"/argunit/home\") application = webapp.WSGIApplication( [('/.*',", "Darmstadt\" __credits__ = [\"<NAME>\", \"<NAME>\", \"<NAME>\"] __license__ = \"ASL\" class", "= webapp.WSGIApplication( [('/.*', Redirector)], debug=True) def main(): run_wsgi_app(application) if __name__", "self.redirect(\"/argunit/home\") def post(self): self.redirect(\"/argunit/home\") application = webapp.WSGIApplication( [('/.*', Redirector)], debug=True)", "[('/.*', Redirector)], debug=True) def main(): run_wsgi_app(application) if __name__ == \"__main__\":", "__author__ = \"<NAME>, <NAME>, and <NAME>\" __copyright__ = \"Copyright 2013-2015", "= \"<NAME>, <NAME>, and <NAME>\" __copyright__ = \"Copyright 2013-2015 UKP", "def post(self): self.redirect(\"/argunit/home\") application = webapp.WSGIApplication( [('/.*', Redirector)], debug=True) def", "<NAME>, and <NAME>\" __copyright__ = \"Copyright 2013-2015 UKP TU Darmstadt\"", "import webapp from google.appengine.ext.webapp.util import run_wsgi_app __author__ = \"<NAME>, <NAME>,", "TU Darmstadt\" __credits__ = [\"<NAME>\", \"<NAME>\", \"<NAME>\"] __license__ = \"ASL\"", "\"Copyright 2013-2015 UKP TU Darmstadt\" __credits__ = [\"<NAME>\", \"<NAME>\", \"<NAME>\"]", "Redirector)], debug=True) def main(): run_wsgi_app(application) if __name__ == \"__main__\": main()", "= \"Copyright 2013-2015 UKP TU Darmstadt\" __credits__ = [\"<NAME>\", \"<NAME>\",", "<NAME>\" __copyright__ = \"Copyright 2013-2015 UKP TU Darmstadt\" __credits__ =", "__copyright__ = \"Copyright 2013-2015 UKP TU Darmstadt\" __credits__ = [\"<NAME>\",", "from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app __author__ =", "webapp from google.appengine.ext.webapp.util import run_wsgi_app __author__ = \"<NAME>, <NAME>, and", "\"<NAME>\", \"<NAME>\"] __license__ = \"ASL\" class Redirector(webapp.RequestHandler): def get(self): self.redirect(\"/argunit/home\")", "google.appengine.ext.webapp.util import run_wsgi_app __author__ = \"<NAME>, <NAME>, and <NAME>\" __copyright__", "import run_wsgi_app __author__ = \"<NAME>, <NAME>, and <NAME>\" __copyright__ =", "get(self): self.redirect(\"/argunit/home\") def post(self): self.redirect(\"/argunit/home\") application = webapp.WSGIApplication( [('/.*', Redirector)],", "run_wsgi_app __author__ = \"<NAME>, <NAME>, and <NAME>\" __copyright__ = \"Copyright" ]
[ "directly from this file, as the categorization is not final.", "_augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): nb_images = len(keypoints_on_images) samples =", "1: height = keypoints_on_image.shape[0] for keypoint in keypoints_on_image.keypoints: keypoint.y =", "Do not import directly from this file, as the categorization", "for keypoint in keypoints_on_image.keypoints: keypoint.x = (width - 1) -", "input images. >>> aug = iaa.Flipud(1.0) would vertically flip/mirror all", "imgaug import augmenters as iaa and then e.g. :: seq", "disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror input images horizontally. Parameters ----------", "division, absolute_import from .. import parameters as iap import numpy", "= iaa.Fliplr(0.5) would horizontally flip/mirror 50 percent of all input", "aug = iaa.Flipud(1.0) would vertically flip/mirror all input images. \"\"\"", "optional(default=None) See `Augmenter.__init__()` Examples -------- >>> aug = iaa.Fliplr(0.5) would", "- 1) - keypoint.y return keypoints_on_images def get_parameters(self): return [self.p]", "def __init__(self, p=0, name=None, deterministic=False, random_state=None): super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state)", "-------- >>> aug = iaa.Fliplr(0.5) would horizontally flip/mirror 50 percent", "self.p = iap.handle_probability_param(p, \"p\") def _augment_images(self, images, random_state, parents, hooks):", "all input images. \"\"\" def __init__(self, p=0, name=None, deterministic=False, random_state=None):", "not final. Use instead :: from imgaug import augmenters as", ">>> aug = iaa.Flipud(0.5) would vertically flip/mirror 50 percent of", "hooks=hooks ) for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped): heatmaps_i.arr_0to1 =", "parameters as iap import numpy as np import six.moves as", "parents, hooks): nb_images = len(images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for", "i, keypoints_on_image in enumerate(keypoints_on_images): if samples[i] == 1: width =", "_augment_images(self, images, random_state, parents, hooks): nb_images = len(images) samples =", "random_state=random_state) for i in sm.xrange(nb_images): if samples[i] == 1: images[i]", "Augmenters that apply mirroring/flipping operations to images. Do not import", "images horizontally. Parameters ---------- p : number or StochasticParameter, optional(default=0)", "of all input images. >>> aug = iaa.Flipud(1.0) would vertically", ": int or np.random.RandomState or None, optional(default=None) See `Augmenter.__init__()` Examples", "Flip/mirror input images vertically. Parameters ---------- p : number or", "sm.xrange(nb_images): if samples[i] == 1: images[i] = np.flipud(images[i]) return images", "for heatmaps_i in heatmaps], random_state=random_state, parents=parents, hooks=hooks ) for heatmaps_i,", "samples[i] == 1: images[i] = np.fliplr(images[i]) return images def _augment_heatmaps(self,", "- 1) - keypoint.x return keypoints_on_images def get_parameters(self): return [self.p]", "hooks): nb_images = len(images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i", "e.g. :: seq = iaa.Sequential([ iaa.Fliplr((0.0, 1.0)), iaa.Flipud((0.0, 1.0)) ])", "super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p, \"p\") def _augment_images(self,", "class Flipud(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror input", "this file, as the categorization is not final. Use instead", "in sm.xrange(nb_images): if samples[i] == 1: images[i] = np.flipud(images[i]) return", "percent of all input images. >>> aug = iaa.Fliplr(1.0) would", "See `Augmenter.__init__()` random_state : int or np.random.RandomState or None, optional(default=None)", "horizontally flip/mirror 50 percent of all input images. >>> aug", "flip/mirror all input images. \"\"\" def __init__(self, p=0, name=None, deterministic=False,", "== 1: height = keypoints_on_image.shape[0] for keypoint in keypoints_on_image.keypoints: keypoint.y", "input images. \"\"\" def __init__(self, p=0, name=None, deterministic=False, random_state=None): super(Flipud,", "`Augmenter.__init__()` deterministic : bool, optional(default=False) See `Augmenter.__init__()` random_state : int", "and then e.g. :: seq = iaa.Sequential([ iaa.Fliplr((0.0, 1.0)), iaa.Flipud((0.0,", "__init__(self, p=0, name=None, deterministic=False, random_state=None): super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p", "1) - keypoint.x return keypoints_on_images def get_parameters(self): return [self.p] class", "images, random_state, parents, hooks): nb_images = len(images) samples = self.p.draw_samples((nb_images,),", "`Augmenter.__init__()` Examples -------- >>> aug = iaa.Flipud(0.5) would vertically flip/mirror", "images def _augment_heatmaps(self, heatmaps, random_state, parents, hooks): arrs_flipped = self._augment_images(", "= len(images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i in sm.xrange(nb_images):", "if samples[i] == 1: images[i] = np.flipud(images[i]) return images def", "parents, hooks): nb_images = len(keypoints_on_images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for", "`Augmenter.__init__()` Examples -------- >>> aug = iaa.Fliplr(0.5) would horizontally flip/mirror", "== 1: images[i] = np.fliplr(images[i]) return images def _augment_heatmaps(self, heatmaps,", "__future__ import print_function, division, absolute_import from .. import parameters as", "line-too-long \"\"\" Flip/mirror input images vertically. Parameters ---------- p :", "of all input images. >>> aug = iaa.Fliplr(1.0) would horizontally", "parents, hooks): arrs_flipped = self._augment_images( [heatmaps_i.arr_0to1 for heatmaps_i in heatmaps],", "then e.g. :: seq = iaa.Sequential([ iaa.Fliplr((0.0, 1.0)), iaa.Flipud((0.0, 1.0))", "import Augmenter class Fliplr(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\"", "images. >>> aug = iaa.Flipud(1.0) would vertically flip/mirror all input", "random_state=None): super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p, \"p\") def", "arrs_flipped): heatmaps_i.arr_0to1 = arr_flipped return heatmaps def _augment_keypoints(self, keypoints_on_images, random_state,", "keypoints_on_image.keypoints: keypoint.x = (width - 1) - keypoint.x return keypoints_on_images", "None, optional(default=None) See `Augmenter.__init__()` Examples -------- >>> aug = iaa.Flipud(0.5)", ": number or StochasticParameter, optional(default=0) Probability of each image to", "vertically. Parameters ---------- p : number or StochasticParameter, optional(default=0) Probability", "iaa.Flipud((0.0, 1.0)) ]) List of augmenters: * Fliplr * Flipud", "= keypoints_on_image.shape[1] for keypoint in keypoints_on_image.keypoints: keypoint.x = (width -", "final. Use instead :: from imgaug import augmenters as iaa", "input images. \"\"\" def __init__(self, p=0, name=None, deterministic=False, random_state=None): super(Fliplr,", "keypoints_on_image.shape[1] for keypoint in keypoints_on_image.keypoints: keypoint.x = (width - 1)", "Flip/mirror input images horizontally. Parameters ---------- p : number or", "flipped. name : string, optional(default=None) See `Augmenter.__init__()` deterministic : bool,", "deterministic : bool, optional(default=False) See `Augmenter.__init__()` random_state : int or", "from .. import parameters as iap import numpy as np", "\"\"\" Augmenters that apply mirroring/flipping operations to images. Do not", "1.0)), iaa.Flipud((0.0, 1.0)) ]) List of augmenters: * Fliplr *", "return images def _augment_heatmaps(self, heatmaps, random_state, parents, hooks): arrs_flipped =", "enumerate(keypoints_on_images): if samples[i] == 1: width = keypoints_on_image.shape[1] for keypoint", "np.fliplr(images[i]) return images def _augment_heatmaps(self, heatmaps, random_state, parents, hooks): arrs_flipped", "= iaa.Flipud(0.5) would vertically flip/mirror 50 percent of all input", "heatmaps_i in heatmaps], random_state=random_state, parents=parents, hooks=hooks ) for heatmaps_i, arr_flipped", "i in sm.xrange(nb_images): if samples[i] == 1: images[i] = np.fliplr(images[i])", "number or StochasticParameter, optional(default=0) Probability of each image to get", "unused-variable, line-too-long \"\"\" Flip/mirror input images horizontally. Parameters ---------- p", "= np.flipud(images[i]) return images def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):", "if samples[i] == 1: height = keypoints_on_image.shape[0] for keypoint in", "hooks): nb_images = len(keypoints_on_images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i,", "heatmaps, random_state, parents, hooks): arrs_flipped = self._augment_images( [heatmaps_i.arr_0to1 for heatmaps_i", "name=None, deterministic=False, random_state=None): super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p,", "return heatmaps def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): nb_images =", "iap import numpy as np import six.moves as sm from", "import print_function, division, absolute_import from .. import parameters as iap", "len(keypoints_on_images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i, keypoints_on_image in enumerate(keypoints_on_images):", "nb_images = len(images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i in", "as the categorization is not final. Use instead :: from", "= self._augment_images( [heatmaps_i.arr_0to1 for heatmaps_i in heatmaps], random_state=random_state, parents=parents, hooks=hooks", "optional(default=0) Probability of each image to get flipped. name :", "Use instead :: from imgaug import augmenters as iaa and", "return keypoints_on_images def get_parameters(self): return [self.p] class Flipud(Augmenter): # pylint:", "= (height - 1) - keypoint.y return keypoints_on_images def get_parameters(self):", "---------- p : number or StochasticParameter, optional(default=0) Probability of each", "enumerate(keypoints_on_images): if samples[i] == 1: height = keypoints_on_image.shape[0] for keypoint", "for i in sm.xrange(nb_images): if samples[i] == 1: images[i] =", "import six.moves as sm from .meta import Augmenter class Fliplr(Augmenter):", "keypoint.x return keypoints_on_images def get_parameters(self): return [self.p] class Flipud(Augmenter): #", "Flipud(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror input images", "np.flipud(images[i]) return images def _augment_heatmaps(self, heatmaps, random_state, parents, hooks): arrs_flipped", "from this file, as the categorization is not final. Use", "from imgaug import augmenters as iaa and then e.g. ::", "aug = iaa.Flipud(0.5) would vertically flip/mirror 50 percent of all", "Fliplr(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror input images", "\"\"\" Flip/mirror input images vertically. Parameters ---------- p : number", "np import six.moves as sm from .meta import Augmenter class", "not import directly from this file, as the categorization is", "keypoints_on_image.keypoints: keypoint.y = (height - 1) - keypoint.y return keypoints_on_images", "heatmaps_i.arr_0to1 = arr_flipped return heatmaps def _augment_keypoints(self, keypoints_on_images, random_state, parents,", "input images. >>> aug = iaa.Fliplr(1.0) would horizontally flip/mirror all", "Examples -------- >>> aug = iaa.Flipud(0.5) would vertically flip/mirror 50", "would horizontally flip/mirror all input images. \"\"\" def __init__(self, p=0,", "= iaa.Flipud(1.0) would vertically flip/mirror all input images. \"\"\" def", "= (width - 1) - keypoint.x return keypoints_on_images def get_parameters(self):", "in sm.xrange(nb_images): if samples[i] == 1: images[i] = np.fliplr(images[i]) return", "arrs_flipped = self._augment_images( [heatmaps_i.arr_0to1 for heatmaps_i in heatmaps], random_state=random_state, parents=parents,", "= np.fliplr(images[i]) return images def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):", "[self.p] class Flipud(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror", "= self.p.draw_samples((nb_images,), random_state=random_state) for i in sm.xrange(nb_images): if samples[i] ==", "string, optional(default=None) See `Augmenter.__init__()` deterministic : bool, optional(default=False) See `Augmenter.__init__()`", "from .meta import Augmenter class Fliplr(Augmenter): # pylint: disable=locally-disabled, unused-variable,", "random_state=random_state, parents=parents, hooks=hooks ) for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped):", "in heatmaps], random_state=random_state, parents=parents, hooks=hooks ) for heatmaps_i, arr_flipped in", "-------- >>> aug = iaa.Flipud(0.5) would vertically flip/mirror 50 percent", "vertically flip/mirror 50 percent of all input images. >>> aug", "if samples[i] == 1: width = keypoints_on_image.shape[1] for keypoint in", "np.random.RandomState or None, optional(default=None) See `Augmenter.__init__()` Examples -------- >>> aug", "iaa.Flipud(1.0) would vertically flip/mirror all input images. \"\"\" def __init__(self,", "import parameters as iap import numpy as np import six.moves", "import directly from this file, as the categorization is not", "in keypoints_on_image.keypoints: keypoint.x = (width - 1) - keypoint.x return", "== 1: width = keypoints_on_image.shape[1] for keypoint in keypoints_on_image.keypoints: keypoint.x", "keypoints_on_image in enumerate(keypoints_on_images): if samples[i] == 1: width = keypoints_on_image.shape[1]", "zip(heatmaps, arrs_flipped): heatmaps_i.arr_0to1 = arr_flipped return heatmaps def _augment_keypoints(self, keypoints_on_images,", "pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror input images horizontally. Parameters", ".. import parameters as iap import numpy as np import", "get flipped. name : string, optional(default=None) See `Augmenter.__init__()` deterministic :", "apply mirroring/flipping operations to images. Do not import directly from", "horizontally flip/mirror all input images. \"\"\" def __init__(self, p=0, name=None,", "Augmenter class Fliplr(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror", "file, as the categorization is not final. Use instead ::", "Fliplr * Flipud \"\"\" from __future__ import print_function, division, absolute_import", "get_parameters(self): return [self.p] class Flipud(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long", "pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror input images vertically. Parameters", "to get flipped. name : string, optional(default=None) See `Augmenter.__init__()` deterministic", "See `Augmenter.__init__()` Examples -------- >>> aug = iaa.Fliplr(0.5) would horizontally", "\"\"\" def __init__(self, p=0, name=None, deterministic=False, random_state=None): super(Flipud, self).__init__(name=name, deterministic=deterministic,", "images[i] = np.fliplr(images[i]) return images def _augment_heatmaps(self, heatmaps, random_state, parents,", "aug = iaa.Fliplr(1.0) would horizontally flip/mirror all input images. \"\"\"", "heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped): heatmaps_i.arr_0to1 = arr_flipped return heatmaps", "samples[i] == 1: height = keypoints_on_image.shape[0] for keypoint in keypoints_on_image.keypoints:", "vertically flip/mirror all input images. \"\"\" def __init__(self, p=0, name=None,", "import numpy as np import six.moves as sm from .meta", "__init__(self, p=0, name=None, deterministic=False, random_state=None): super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p", "== 1: images[i] = np.flipud(images[i]) return images def _augment_heatmaps(self, heatmaps,", "samples = self.p.draw_samples((nb_images,), random_state=random_state) for i, keypoints_on_image in enumerate(keypoints_on_images): if", "def __init__(self, p=0, name=None, deterministic=False, random_state=None): super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state)", "from __future__ import print_function, division, absolute_import from .. import parameters", "sm.xrange(nb_images): if samples[i] == 1: images[i] = np.fliplr(images[i]) return images", "= iaa.Fliplr(1.0) would horizontally flip/mirror all input images. \"\"\" def", "]) List of augmenters: * Fliplr * Flipud \"\"\" from", "samples = self.p.draw_samples((nb_images,), random_state=random_state) for i in sm.xrange(nb_images): if samples[i]", "random_state, parents, hooks): nb_images = len(images) samples = self.p.draw_samples((nb_images,), random_state=random_state)", "= len(keypoints_on_images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i, keypoints_on_image in", "as np import six.moves as sm from .meta import Augmenter", "self.p.draw_samples((nb_images,), random_state=random_state) for i, keypoints_on_image in enumerate(keypoints_on_images): if samples[i] ==", "height = keypoints_on_image.shape[0] for keypoint in keypoints_on_image.keypoints: keypoint.y = (height", "StochasticParameter, optional(default=0) Probability of each image to get flipped. name", "random_state, parents, hooks): arrs_flipped = self._augment_images( [heatmaps_i.arr_0to1 for heatmaps_i in", "augmenters as iaa and then e.g. :: seq = iaa.Sequential([", "1.0)) ]) List of augmenters: * Fliplr * Flipud \"\"\"", "= keypoints_on_image.shape[0] for keypoint in keypoints_on_image.keypoints: keypoint.y = (height -", "keypoint.x = (width - 1) - keypoint.x return keypoints_on_images def", "keypoints_on_image.shape[0] for keypoint in keypoints_on_image.keypoints: keypoint.y = (height - 1)", "(height - 1) - keypoint.y return keypoints_on_images def get_parameters(self): return", "1: images[i] = np.flipud(images[i]) return images def _augment_heatmaps(self, heatmaps, random_state,", "name=None, deterministic=False, random_state=None): super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p,", "six.moves as sm from .meta import Augmenter class Fliplr(Augmenter): #", ") for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped): heatmaps_i.arr_0to1 = arr_flipped", "def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): nb_images = len(keypoints_on_images) samples", "is not final. Use instead :: from imgaug import augmenters", "as iap import numpy as np import six.moves as sm", "keypoints_on_images, random_state, parents, hooks): nb_images = len(keypoints_on_images) samples = self.p.draw_samples((nb_images,),", "deterministic=False, random_state=None): super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p, \"p\")", "p : number or StochasticParameter, optional(default=0) Probability of each image", "bool, optional(default=False) See `Augmenter.__init__()` random_state : int or np.random.RandomState or", "def _augment_images(self, images, random_state, parents, hooks): nb_images = len(images) samples", "keypoint in keypoints_on_image.keypoints: keypoint.y = (height - 1) - keypoint.y", "def get_parameters(self): return [self.p] class Flipud(Augmenter): # pylint: disable=locally-disabled, unused-variable,", "if samples[i] == 1: images[i] = np.fliplr(images[i]) return images def", "= arr_flipped return heatmaps def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):", "would vertically flip/mirror 50 percent of all input images. >>>", "self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p, \"p\") def _augment_images(self, images,", "\"\"\" from __future__ import print_function, division, absolute_import from .. import", "i, keypoints_on_image in enumerate(keypoints_on_images): if samples[i] == 1: height =", "class Fliplr(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror input", "= iaa.Sequential([ iaa.Fliplr((0.0, 1.0)), iaa.Flipud((0.0, 1.0)) ]) List of augmenters:", "mirroring/flipping operations to images. Do not import directly from this", "disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror input images vertically. Parameters ----------", "`Augmenter.__init__()` random_state : int or np.random.RandomState or None, optional(default=None) See", "heatmaps def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): nb_images = len(keypoints_on_images)", "(width - 1) - keypoint.x return keypoints_on_images def get_parameters(self): return", ">>> aug = iaa.Fliplr(0.5) would horizontally flip/mirror 50 percent of", ": bool, optional(default=False) See `Augmenter.__init__()` random_state : int or np.random.RandomState", "horizontally. Parameters ---------- p : number or StochasticParameter, optional(default=0) Probability", "Flipud \"\"\" from __future__ import print_function, division, absolute_import from ..", "unused-variable, line-too-long \"\"\" Flip/mirror input images vertically. Parameters ---------- p", "Probability of each image to get flipped. name : string,", "image to get flipped. name : string, optional(default=None) See `Augmenter.__init__()`", "random_state=random_state) self.p = iap.handle_probability_param(p, \"p\") def _augment_images(self, images, random_state, parents,", "input images vertically. Parameters ---------- p : number or StochasticParameter,", "for i, keypoints_on_image in enumerate(keypoints_on_images): if samples[i] == 1: height", "iaa.Fliplr((0.0, 1.0)), iaa.Flipud((0.0, 1.0)) ]) List of augmenters: * Fliplr", "= self.p.draw_samples((nb_images,), random_state=random_state) for i, keypoints_on_image in enumerate(keypoints_on_images): if samples[i]", "int or np.random.RandomState or None, optional(default=None) See `Augmenter.__init__()` Examples --------", "as iaa and then e.g. :: seq = iaa.Sequential([ iaa.Fliplr((0.0,", "as sm from .meta import Augmenter class Fliplr(Augmenter): # pylint:", "_augment_heatmaps(self, heatmaps, random_state, parents, hooks): arrs_flipped = self._augment_images( [heatmaps_i.arr_0to1 for", "seq = iaa.Sequential([ iaa.Fliplr((0.0, 1.0)), iaa.Flipud((0.0, 1.0)) ]) List of", ":: seq = iaa.Sequential([ iaa.Fliplr((0.0, 1.0)), iaa.Flipud((0.0, 1.0)) ]) List", "arr_flipped return heatmaps def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): nb_images", "categorization is not final. Use instead :: from imgaug import", "augmenters: * Fliplr * Flipud \"\"\" from __future__ import print_function,", "width = keypoints_on_image.shape[1] for keypoint in keypoints_on_image.keypoints: keypoint.x = (width", "p=0, name=None, deterministic=False, random_state=None): super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p =", "would vertically flip/mirror all input images. \"\"\" def __init__(self, p=0,", "instead :: from imgaug import augmenters as iaa and then", "each image to get flipped. name : string, optional(default=None) See", "of each image to get flipped. name : string, optional(default=None)", "that apply mirroring/flipping operations to images. Do not import directly", "optional(default=False) See `Augmenter.__init__()` random_state : int or np.random.RandomState or None,", "name : string, optional(default=None) See `Augmenter.__init__()` deterministic : bool, optional(default=False)", "images[i] = np.flipud(images[i]) return images def _augment_heatmaps(self, heatmaps, random_state, parents,", "images. >>> aug = iaa.Fliplr(1.0) would horizontally flip/mirror all input", "1: images[i] = np.fliplr(images[i]) return images def _augment_heatmaps(self, heatmaps, random_state,", "return [self.p] class Flipud(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\"", "in enumerate(keypoints_on_images): if samples[i] == 1: width = keypoints_on_image.shape[1] for", "def _augment_heatmaps(self, heatmaps, random_state, parents, hooks): arrs_flipped = self._augment_images( [heatmaps_i.arr_0to1", "line-too-long \"\"\" Flip/mirror input images horizontally. Parameters ---------- p :", "keypoints_on_image in enumerate(keypoints_on_images): if samples[i] == 1: height = keypoints_on_image.shape[0]", "- keypoint.x return keypoints_on_images def get_parameters(self): return [self.p] class Flipud(Augmenter):", "List of augmenters: * Fliplr * Flipud \"\"\" from __future__", "samples[i] == 1: images[i] = np.flipud(images[i]) return images def _augment_heatmaps(self,", "random_state=None): super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p, \"p\") def", "input images horizontally. Parameters ---------- p : number or StochasticParameter,", "keypoints_on_images def get_parameters(self): return [self.p] class Flipud(Augmenter): # pylint: disable=locally-disabled,", "to images. Do not import directly from this file, as", "\"p\") def _augment_images(self, images, random_state, parents, hooks): nb_images = len(images)", "operations to images. Do not import directly from this file,", "p=0, name=None, deterministic=False, random_state=None): super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p =", "for keypoint in keypoints_on_image.keypoints: keypoint.y = (height - 1) -", "# pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror input images vertically.", "optional(default=None) See `Augmenter.__init__()` deterministic : bool, optional(default=False) See `Augmenter.__init__()` random_state", "in zip(heatmaps, arrs_flipped): heatmaps_i.arr_0to1 = arr_flipped return heatmaps def _augment_keypoints(self,", "Examples -------- >>> aug = iaa.Fliplr(0.5) would horizontally flip/mirror 50", "iaa.Fliplr(0.5) would horizontally flip/mirror 50 percent of all input images.", "<reponame>pAoenix/image-Augmented \"\"\" Augmenters that apply mirroring/flipping operations to images. Do", "iap.handle_probability_param(p, \"p\") def _augment_images(self, images, random_state, parents, hooks): nb_images =", ":: from imgaug import augmenters as iaa and then e.g.", "deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p, \"p\") def _augment_images(self, images, random_state,", "deterministic=False, random_state=None): super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p, \"p\")", "See `Augmenter.__init__()` Examples -------- >>> aug = iaa.Flipud(0.5) would vertically", "hooks): arrs_flipped = self._augment_images( [heatmaps_i.arr_0to1 for heatmaps_i in heatmaps], random_state=random_state,", ".meta import Augmenter class Fliplr(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long", "for i, keypoints_on_image in enumerate(keypoints_on_images): if samples[i] == 1: width", "50 percent of all input images. >>> aug = iaa.Fliplr(1.0)", "all input images. >>> aug = iaa.Fliplr(1.0) would horizontally flip/mirror", "random_state : int or np.random.RandomState or None, optional(default=None) See `Augmenter.__init__()`", "samples[i] == 1: width = keypoints_on_image.shape[1] for keypoint in keypoints_on_image.keypoints:", "images. \"\"\" def __init__(self, p=0, name=None, deterministic=False, random_state=None): super(Flipud, self).__init__(name=name,", "absolute_import from .. import parameters as iap import numpy as", "all input images. >>> aug = iaa.Flipud(1.0) would vertically flip/mirror", "or np.random.RandomState or None, optional(default=None) See `Augmenter.__init__()` Examples -------- >>>", "[heatmaps_i.arr_0to1 for heatmaps_i in heatmaps], random_state=random_state, parents=parents, hooks=hooks ) for", "aug = iaa.Fliplr(0.5) would horizontally flip/mirror 50 percent of all", "sm from .meta import Augmenter class Fliplr(Augmenter): # pylint: disable=locally-disabled,", "Parameters ---------- p : number or StochasticParameter, optional(default=0) Probability of", "the categorization is not final. Use instead :: from imgaug", "iaa.Sequential([ iaa.Fliplr((0.0, 1.0)), iaa.Flipud((0.0, 1.0)) ]) List of augmenters: *", "or StochasticParameter, optional(default=0) Probability of each image to get flipped.", "keypoint in keypoints_on_image.keypoints: keypoint.x = (width - 1) - keypoint.x", "\"\"\" def __init__(self, p=0, name=None, deterministic=False, random_state=None): super(Fliplr, self).__init__(name=name, deterministic=deterministic,", "in keypoints_on_image.keypoints: keypoint.y = (height - 1) - keypoint.y return", "\"\"\" Flip/mirror input images horizontally. Parameters ---------- p : number", "flip/mirror 50 percent of all input images. >>> aug =", "print_function, division, absolute_import from .. import parameters as iap import", "1: width = keypoints_on_image.shape[1] for keypoint in keypoints_on_image.keypoints: keypoint.x =", "import augmenters as iaa and then e.g. :: seq =", "* Fliplr * Flipud \"\"\" from __future__ import print_function, division,", "optional(default=None) See `Augmenter.__init__()` Examples -------- >>> aug = iaa.Flipud(0.5) would", "images. \"\"\" def __init__(self, p=0, name=None, deterministic=False, random_state=None): super(Fliplr, self).__init__(name=name,", "random_state=random_state) for i, keypoints_on_image in enumerate(keypoints_on_images): if samples[i] == 1:", "percent of all input images. >>> aug = iaa.Flipud(1.0) would", "50 percent of all input images. >>> aug = iaa.Flipud(1.0)", "iaa.Fliplr(1.0) would horizontally flip/mirror all input images. \"\"\" def __init__(self,", "self._augment_images( [heatmaps_i.arr_0to1 for heatmaps_i in heatmaps], random_state=random_state, parents=parents, hooks=hooks )", "nb_images = len(keypoints_on_images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i, keypoints_on_image", "See `Augmenter.__init__()` deterministic : bool, optional(default=False) See `Augmenter.__init__()` random_state :", "for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped): heatmaps_i.arr_0to1 = arr_flipped return", "images vertically. Parameters ---------- p : number or StochasticParameter, optional(default=0)", "heatmaps], random_state=random_state, parents=parents, hooks=hooks ) for heatmaps_i, arr_flipped in zip(heatmaps,", "random_state, parents, hooks): nb_images = len(keypoints_on_images) samples = self.p.draw_samples((nb_images,), random_state=random_state)", "len(images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i in sm.xrange(nb_images): if", ">>> aug = iaa.Fliplr(1.0) would horizontally flip/mirror all input images.", "arr_flipped in zip(heatmaps, arrs_flipped): heatmaps_i.arr_0to1 = arr_flipped return heatmaps def", "would horizontally flip/mirror 50 percent of all input images. >>>", "* Flipud \"\"\" from __future__ import print_function, division, absolute_import from", "parents=parents, hooks=hooks ) for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped): heatmaps_i.arr_0to1", "i in sm.xrange(nb_images): if samples[i] == 1: images[i] = np.flipud(images[i])", "numpy as np import six.moves as sm from .meta import", ": string, optional(default=None) See `Augmenter.__init__()` deterministic : bool, optional(default=False) See", "of augmenters: * Fliplr * Flipud \"\"\" from __future__ import", "iaa and then e.g. :: seq = iaa.Sequential([ iaa.Fliplr((0.0, 1.0)),", "None, optional(default=None) See `Augmenter.__init__()` Examples -------- >>> aug = iaa.Fliplr(0.5)", "iaa.Flipud(0.5) would vertically flip/mirror 50 percent of all input images.", ">>> aug = iaa.Flipud(1.0) would vertically flip/mirror all input images.", "self.p.draw_samples((nb_images,), random_state=random_state) for i in sm.xrange(nb_images): if samples[i] == 1:", "images. Do not import directly from this file, as the", "# pylint: disable=locally-disabled, unused-variable, line-too-long \"\"\" Flip/mirror input images horizontally.", "or None, optional(default=None) See `Augmenter.__init__()` Examples -------- >>> aug =", "keypoint.y = (height - 1) - keypoint.y return keypoints_on_images def", "super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p, \"p\") def _augment_images(self,", "in enumerate(keypoints_on_images): if samples[i] == 1: height = keypoints_on_image.shape[0] for", "= iap.handle_probability_param(p, \"p\") def _augment_images(self, images, random_state, parents, hooks): nb_images" ]
[ "Complexity - O(n) class Solution: def addTwoNumbers(self, l1: ListNode, l2:", "tempsum//10 tempsum %= 10 else: carry = 0 temp.next =", "= ListNode(tempsum) temp = temp.next l1 = l1.next elif l2:", "tempsum = l1.val + carry if tempsum > 9: carry", "tempsum = l1.val + l2.val tempsum += carry if tempsum", "= l2.next if l1: while l1: tempsum = l1.val +", "O(n) ; Space Complexity - O(n) class Solution: def addTwoNumbers(self,", "Space Complexity - O(n) class Solution: def addTwoNumbers(self, l1: ListNode,", "l1.val + l2.val tempsum += carry if tempsum > 9:", "temp.next l1 = l1.next elif l2: while l2: tempsum =", "= l2.val + carry if tempsum > 9: carry =", "ListNode() while l1 is not None and l2 is not", "l1.next elif l2: while l2: tempsum = l2.val + carry", "if tempsum > 9: carry = tempsum//10 tempsum %= 10", "carry if tempsum > 9: carry = tempsum//10 tempsum %=", "not None: tempsum = l1.val + l2.val tempsum += carry", "while l1: tempsum = l1.val + carry if tempsum >", "l1: ListNode, l2: ListNode) -> ListNode: carry = 0 out", "= temp = ListNode() while l1 is not None and", "= 0 temp.next = ListNode(tempsum) temp = temp.next l1 =", "l2.val + carry if tempsum > 9: carry = tempsum//10", "l1 = l1.next l2 = l2.next if l1: while l1:", "None: tempsum = l1.val + l2.val tempsum += carry if", "and l2 is not None: tempsum = l1.val + l2.val", "temp = ListNode() while l1 is not None and l2", "= ListNode(tempsum) temp = temp.next l2 = l2.next if carry:", "Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: carry", "; Space Complexity - O(n) class Solution: def addTwoNumbers(self, l1:", "- O(n) class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode)", "l1 = l1.next elif l2: while l2: tempsum = l2.val", "temp.next l2 = l2.next if carry: temp.next = ListNode(carry) return", "l2: ListNode) -> ListNode: carry = 0 out = temp", "l1.val + carry if tempsum > 9: carry = tempsum//10", "l2.next if l1: while l1: tempsum = l1.val + carry", "= 0 out = temp = ListNode() while l1 is", "temp = temp.next l2 = l2.next if carry: temp.next =", "ListNode(tempsum) temp = temp.next l1 = l1.next l2 = l2.next", "9: carry = tempsum//10 tempsum %= 10 else: carry =", "= temp.next l1 = l1.next l2 = l2.next if l1:", "out = temp = ListNode() while l1 is not None", "0 temp.next = ListNode(tempsum) temp = temp.next l2 = l2.next", "temp = temp.next l1 = l1.next l2 = l2.next if", "temp.next = ListNode(tempsum) temp = temp.next l1 = l1.next l2", "None and l2 is not None: tempsum = l1.val +", "ListNode(tempsum) temp = temp.next l1 = l1.next elif l2: while", "Time Complexity - O(n) ; Space Complexity - O(n) class", "temp = temp.next l1 = l1.next elif l2: while l2:", "10 else: carry = 0 temp.next = ListNode(tempsum) temp =", "= l1.val + carry if tempsum > 9: carry =", "= temp.next l2 = l2.next if carry: temp.next = ListNode(carry)", "l2.val tempsum += carry if tempsum > 9: carry =", "= 0 temp.next = ListNode(tempsum) temp = temp.next l2 =", "carry = 0 temp.next = ListNode(tempsum) temp = temp.next l2", "= ListNode() while l1 is not None and l2 is", "= l1.next elif l2: while l2: tempsum = l2.val +", "ListNode: carry = 0 out = temp = ListNode() while", "- O(n) ; Space Complexity - O(n) class Solution: def", "l2: tempsum = l2.val + carry if tempsum > 9:", "temp.next = ListNode(tempsum) temp = temp.next l2 = l2.next if", "+ l2.val tempsum += carry if tempsum > 9: carry", "if l1: while l1: tempsum = l1.val + carry if", "= ListNode(tempsum) temp = temp.next l1 = l1.next l2 =", "carry = 0 out = temp = ListNode() while l1", "+ carry if tempsum > 9: carry = tempsum//10 tempsum", "# Time Complexity - O(n) ; Space Complexity - O(n)", "is not None: tempsum = l1.val + l2.val tempsum +=", "+= carry if tempsum > 9: carry = tempsum//10 tempsum", "> 9: carry = tempsum//10 tempsum %= 10 else: carry", "ListNode, l2: ListNode) -> ListNode: carry = 0 out =", "else: carry = 0 temp.next = ListNode(tempsum) temp = temp.next", "carry = tempsum//10 tempsum %= 10 else: carry = 0", "l2 = l2.next if l1: while l1: tempsum = l1.val", "= l1.next l2 = l2.next if l1: while l1: tempsum", "Complexity - O(n) ; Space Complexity - O(n) class Solution:", "l1.next l2 = l2.next if l1: while l1: tempsum =", "is not None and l2 is not None: tempsum =", "l2 = l2.next if carry: temp.next = ListNode(carry) return out.next", "l2 is not None: tempsum = l1.val + l2.val tempsum", "elif l2: while l2: tempsum = l2.val + carry if", "addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: carry = 0", "0 out = temp = ListNode() while l1 is not", "tempsum = l2.val + carry if tempsum > 9: carry", "O(n) class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) ->", "tempsum %= 10 else: carry = 0 temp.next = ListNode(tempsum)", "while l2: tempsum = l2.val + carry if tempsum >", "temp.next l1 = l1.next l2 = l2.next if l1: while", "l1: tempsum = l1.val + carry if tempsum > 9:", "l2: while l2: tempsum = l2.val + carry if tempsum", "= tempsum//10 tempsum %= 10 else: carry = 0 temp.next", "0 temp.next = ListNode(tempsum) temp = temp.next l1 = l1.next", "temp.next = ListNode(tempsum) temp = temp.next l1 = l1.next elif", "class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:", "carry = 0 temp.next = ListNode(tempsum) temp = temp.next l1", "%= 10 else: carry = 0 temp.next = ListNode(tempsum) temp", "ListNode) -> ListNode: carry = 0 out = temp =", "tempsum += carry if tempsum > 9: carry = tempsum//10", "= temp.next l1 = l1.next elif l2: while l2: tempsum", "tempsum > 9: carry = tempsum//10 tempsum %= 10 else:", "-> ListNode: carry = 0 out = temp = ListNode()", "l1: while l1: tempsum = l1.val + carry if tempsum", "= l1.val + l2.val tempsum += carry if tempsum >", "not None and l2 is not None: tempsum = l1.val", "l1 is not None and l2 is not None: tempsum", "def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: carry =", "while l1 is not None and l2 is not None:", "ListNode(tempsum) temp = temp.next l2 = l2.next if carry: temp.next" ]
[ "num_samples=50, full_cov=True) plt.plot(Xs, samples[-1].numpy()[:, :, 0].T, color='r', alpha=0.3) plt.title('Deep Gaussian", "1e-5) return dgp if __name__ == '__main__': Xs, X_train, Y_train,", "dgp.trainable_variables)) print(f\"ELBO: {-objective.numpy()}\") samples, _, _ = dgp.predict_all_layers(Xs, num_samples=50, full_cov=True)", "0. if x < 0.5 else 1. Y = np.reshape([f_step(x)", "optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08) for _ in range(1500): with tf.GradientTape(watch_accessed_variables=False)", "plt.plot(Xs, samples[-1].numpy()[:, :, 0].T, color='r', alpha=0.3) plt.title('Deep Gaussian Process') plt.scatter(X_train,", "tf.random.set_seed(0) def get_data(): Ns = 300 Xs = np.linspace(-0.5, 1.5,", "to be near deterministic for layer in dgp.layers[:-1]: layer.q_sqrt.assign(layer.q_sqrt *", "_ = dgp.predict_all_layers(Xs, num_samples=50, full_cov=True) plt.plot(Xs, samples[-1].numpy()[:, :, 0].T, color='r',", "layer in dgp.layers[:-1]: layer.q_sqrt.assign(layer.q_sqrt * 1e-5) return dgp if __name__", "= np.linspace(-0.5, 1.5, Ns)[:, None] N, M = 50, 25", "kernel = RBF(lengthscales=0.2, variance=1.0) + White(variance=1e-5) kernels.append(kernel) layer_sizes.append(1) dgp =", "= tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08) for _ in range(1500): with tf.GradientTape(watch_accessed_variables=False) as", "as plt from gpflow.kernels import White, RBF from gpflow.likelihoods import", "tensorflow as tf import matplotlib.pyplot as plt from gpflow.kernels import", "in range(1500): with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(dgp.trainable_variables) objective = -dgp.elbo((X_train,", "= np.reshape([f_step(x) for x in X], X.shape) + np.random.randn( *X.shape)", "1e-2 return Xs, X, Y, Z def make_deep_GP(num_layers, X, Y,", "numpy as np import tensorflow as tf import matplotlib.pyplot as", "gpflow.likelihoods import Gaussian from deep_gp import DeepGP np.random.seed(0) tf.random.set_seed(0) def", "import matplotlib.pyplot as plt from gpflow.kernels import White, RBF from", "1, M)[:, None] f_step = lambda x: 0. if x", "def make_deep_GP(num_layers, X, Y, Z): kernels = [] layer_sizes =", "M = 50, 25 X = np.random.uniform(0, 1, N)[:, None]", "gradients = tape.gradient(objective, dgp.trainable_variables) optimizer.apply_gradients(zip(gradients, dgp.trainable_variables)) print(f\"ELBO: {-objective.numpy()}\") samples, _,", "print(f\"ELBO: {-objective.numpy()}\") samples, _, _ = dgp.predict_all_layers(Xs, num_samples=50, full_cov=True) plt.plot(Xs,", "* 1e-5) return dgp if __name__ == '__main__': Xs, X_train,", "dgp.predict_all_layers(Xs, num_samples=50, full_cov=True) plt.plot(Xs, samples[-1].numpy()[:, :, 0].T, color='r', alpha=0.3) plt.title('Deep", "layer_sizes = [] for l in range(num_layers): kernel = RBF(lengthscales=0.2,", "np import tensorflow as tf import matplotlib.pyplot as plt from", "np.random.seed(0) tf.random.set_seed(0) def get_data(): Ns = 300 Xs = np.linspace(-0.5,", "Gaussian(), num_samples=100) # init hidden layers to be near deterministic", "epsilon=1e-08) for _ in range(1500): with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(dgp.trainable_variables)", "X, Y, Z): kernels = [] layer_sizes = [] for", "DeepGP np.random.seed(0) tf.random.set_seed(0) def get_data(): Ns = 300 Xs =", "X = np.random.uniform(0, 1, N)[:, None] Z = np.random.uniform(0, 1,", "get_data(): Ns = 300 Xs = np.linspace(-0.5, 1.5, Ns)[:, None]", "tape.gradient(objective, dgp.trainable_variables) optimizer.apply_gradients(zip(gradients, dgp.trainable_variables)) print(f\"ELBO: {-objective.numpy()}\") samples, _, _ =", "from deep_gp import DeepGP np.random.seed(0) tf.random.set_seed(0) def get_data(): Ns =", "tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(dgp.trainable_variables) objective = -dgp.elbo((X_train, Y_train)) gradients =", "_ in range(1500): with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(dgp.trainable_variables) objective =", "N, M = 50, 25 X = np.random.uniform(0, 1, N)[:,", "X_train, Y_train, Z) optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08) for _ in", "dgp = DeepGP(X, Y, Z, kernels, layer_sizes, Gaussian(), num_samples=100) #", "dgp = make_deep_GP(3, X_train, Y_train, Z) optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08)", "be near deterministic for layer in dgp.layers[:-1]: layer.q_sqrt.assign(layer.q_sqrt * 1e-5)", "else 1. Y = np.reshape([f_step(x) for x in X], X.shape)", "X], X.shape) + np.random.randn( *X.shape) * 1e-2 return Xs, X,", "range(num_layers): kernel = RBF(lengthscales=0.2, variance=1.0) + White(variance=1e-5) kernels.append(kernel) layer_sizes.append(1) dgp", "optimizer.apply_gradients(zip(gradients, dgp.trainable_variables)) print(f\"ELBO: {-objective.numpy()}\") samples, _, _ = dgp.predict_all_layers(Xs, num_samples=50,", "deep_gp import DeepGP np.random.seed(0) tf.random.set_seed(0) def get_data(): Ns = 300", "1. Y = np.reshape([f_step(x) for x in X], X.shape) +", "tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08) for _ in range(1500): with tf.GradientTape(watch_accessed_variables=False) as tape:", "25 X = np.random.uniform(0, 1, N)[:, None] Z = np.random.uniform(0,", "layer.q_sqrt.assign(layer.q_sqrt * 1e-5) return dgp if __name__ == '__main__': Xs,", "import Gaussian from deep_gp import DeepGP np.random.seed(0) tf.random.set_seed(0) def get_data():", "as np import tensorflow as tf import matplotlib.pyplot as plt", "for x in X], X.shape) + np.random.randn( *X.shape) * 1e-2", "range(1500): with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(dgp.trainable_variables) objective = -dgp.elbo((X_train, Y_train))", "np.random.uniform(0, 1, M)[:, None] f_step = lambda x: 0. if", "for l in range(num_layers): kernel = RBF(lengthscales=0.2, variance=1.0) + White(variance=1e-5)", "num_samples=100) # init hidden layers to be near deterministic for", "import White, RBF from gpflow.likelihoods import Gaussian from deep_gp import", "hidden layers to be near deterministic for layer in dgp.layers[:-1]:", "Y, Z, kernels, layer_sizes, Gaussian(), num_samples=100) # init hidden layers", "layer_sizes, Gaussian(), num_samples=100) # init hidden layers to be near", "RBF(lengthscales=0.2, variance=1.0) + White(variance=1e-5) kernels.append(kernel) layer_sizes.append(1) dgp = DeepGP(X, Y,", "objective = -dgp.elbo((X_train, Y_train)) gradients = tape.gradient(objective, dgp.trainable_variables) optimizer.apply_gradients(zip(gradients, dgp.trainable_variables))", "Y_train)) gradients = tape.gradient(objective, dgp.trainable_variables) optimizer.apply_gradients(zip(gradients, dgp.trainable_variables)) print(f\"ELBO: {-objective.numpy()}\") samples,", "= tape.gradient(objective, dgp.trainable_variables) optimizer.apply_gradients(zip(gradients, dgp.trainable_variables)) print(f\"ELBO: {-objective.numpy()}\") samples, _, _", "_, _ = dgp.predict_all_layers(Xs, num_samples=50, full_cov=True) plt.plot(Xs, samples[-1].numpy()[:, :, 0].T,", "import tensorflow as tf import matplotlib.pyplot as plt from gpflow.kernels", "full_cov=True) plt.plot(Xs, samples[-1].numpy()[:, :, 0].T, color='r', alpha=0.3) plt.title('Deep Gaussian Process')", "in range(num_layers): kernel = RBF(lengthscales=0.2, variance=1.0) + White(variance=1e-5) kernels.append(kernel) layer_sizes.append(1)", "Z = get_data() dgp = make_deep_GP(3, X_train, Y_train, Z) optimizer", "Y, Z def make_deep_GP(num_layers, X, Y, Z): kernels = []", "1.5, Ns)[:, None] N, M = 50, 25 X =", "= -dgp.elbo((X_train, Y_train)) gradients = tape.gradient(objective, dgp.trainable_variables) optimizer.apply_gradients(zip(gradients, dgp.trainable_variables)) print(f\"ELBO:", "in X], X.shape) + np.random.randn( *X.shape) * 1e-2 return Xs,", "White(variance=1e-5) kernels.append(kernel) layer_sizes.append(1) dgp = DeepGP(X, Y, Z, kernels, layer_sizes,", "np.reshape([f_step(x) for x in X], X.shape) + np.random.randn( *X.shape) *", "dgp.trainable_variables) optimizer.apply_gradients(zip(gradients, dgp.trainable_variables)) print(f\"ELBO: {-objective.numpy()}\") samples, _, _ = dgp.predict_all_layers(Xs,", "__name__ == '__main__': Xs, X_train, Y_train, Z = get_data() dgp", "get_data() dgp = make_deep_GP(3, X_train, Y_train, Z) optimizer = tf.optimizers.Adam(learning_rate=0.01,", "Y_train, Z) optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08) for _ in range(1500):", "make_deep_GP(num_layers, X, Y, Z): kernels = [] layer_sizes = []", "* 1e-2 return Xs, X, Y, Z def make_deep_GP(num_layers, X,", "make_deep_GP(3, X_train, Y_train, Z) optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08) for _", "with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(dgp.trainable_variables) objective = -dgp.elbo((X_train, Y_train)) gradients", "Gaussian from deep_gp import DeepGP np.random.seed(0) tf.random.set_seed(0) def get_data(): Ns", "1, N)[:, None] Z = np.random.uniform(0, 1, M)[:, None] f_step", "X.shape) + np.random.randn( *X.shape) * 1e-2 return Xs, X, Y,", "dgp if __name__ == '__main__': Xs, X_train, Y_train, Z =", "gpflow.kernels import White, RBF from gpflow.likelihoods import Gaussian from deep_gp", "np.random.randn( *X.shape) * 1e-2 return Xs, X, Y, Z def", "samples, _, _ = dgp.predict_all_layers(Xs, num_samples=50, full_cov=True) plt.plot(Xs, samples[-1].numpy()[:, :,", "import DeepGP np.random.seed(0) tf.random.set_seed(0) def get_data(): Ns = 300 Xs", "np.linspace(-0.5, 1.5, Ns)[:, None] N, M = 50, 25 X", "Xs, X_train, Y_train, Z = get_data() dgp = make_deep_GP(3, X_train,", "import numpy as np import tensorflow as tf import matplotlib.pyplot", "-dgp.elbo((X_train, Y_train)) gradients = tape.gradient(objective, dgp.trainable_variables) optimizer.apply_gradients(zip(gradients, dgp.trainable_variables)) print(f\"ELBO: {-objective.numpy()}\")", "tape.watch(dgp.trainable_variables) objective = -dgp.elbo((X_train, Y_train)) gradients = tape.gradient(objective, dgp.trainable_variables) optimizer.apply_gradients(zip(gradients,", "<reponame>dks28/Deep-Gaussian-Process import numpy as np import tensorflow as tf import", "Z) optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08) for _ in range(1500): with", "N)[:, None] Z = np.random.uniform(0, 1, M)[:, None] f_step =", "'__main__': Xs, X_train, Y_train, Z = get_data() dgp = make_deep_GP(3,", "from gpflow.likelihoods import Gaussian from deep_gp import DeepGP np.random.seed(0) tf.random.set_seed(0)", "x < 0.5 else 1. Y = np.reshape([f_step(x) for x", "Xs = np.linspace(-0.5, 1.5, Ns)[:, None] N, M = 50,", "tf import matplotlib.pyplot as plt from gpflow.kernels import White, RBF", "+ np.random.randn( *X.shape) * 1e-2 return Xs, X, Y, Z", "Z): kernels = [] layer_sizes = [] for l in", "variance=1.0) + White(variance=1e-5) kernels.append(kernel) layer_sizes.append(1) dgp = DeepGP(X, Y, Z,", "0.5 else 1. Y = np.reshape([f_step(x) for x in X],", "None] Z = np.random.uniform(0, 1, M)[:, None] f_step = lambda", "= np.random.uniform(0, 1, N)[:, None] Z = np.random.uniform(0, 1, M)[:,", "= [] layer_sizes = [] for l in range(num_layers): kernel", "[] for l in range(num_layers): kernel = RBF(lengthscales=0.2, variance=1.0) +", "Z def make_deep_GP(num_layers, X, Y, Z): kernels = [] layer_sizes", "White, RBF from gpflow.likelihoods import Gaussian from deep_gp import DeepGP", "Z = np.random.uniform(0, 1, M)[:, None] f_step = lambda x:", "if x < 0.5 else 1. Y = np.reshape([f_step(x) for", "f_step = lambda x: 0. if x < 0.5 else", "*X.shape) * 1e-2 return Xs, X, Y, Z def make_deep_GP(num_layers,", "init hidden layers to be near deterministic for layer in", "deterministic for layer in dgp.layers[:-1]: layer.q_sqrt.assign(layer.q_sqrt * 1e-5) return dgp", "Ns)[:, None] N, M = 50, 25 X = np.random.uniform(0,", "samples[-1].numpy()[:, :, 0].T, color='r', alpha=0.3) plt.title('Deep Gaussian Process') plt.scatter(X_train, Y_train)", "x: 0. if x < 0.5 else 1. Y =", "kernels, layer_sizes, Gaussian(), num_samples=100) # init hidden layers to be", "in dgp.layers[:-1]: layer.q_sqrt.assign(layer.q_sqrt * 1e-5) return dgp if __name__ ==", "layers to be near deterministic for layer in dgp.layers[:-1]: layer.q_sqrt.assign(layer.q_sqrt", "layer_sizes.append(1) dgp = DeepGP(X, Y, Z, kernels, layer_sizes, Gaussian(), num_samples=100)", "for layer in dgp.layers[:-1]: layer.q_sqrt.assign(layer.q_sqrt * 1e-5) return dgp if", "M)[:, None] f_step = lambda x: 0. if x <", "plt from gpflow.kernels import White, RBF from gpflow.likelihoods import Gaussian", "= DeepGP(X, Y, Z, kernels, layer_sizes, Gaussian(), num_samples=100) # init", "return dgp if __name__ == '__main__': Xs, X_train, Y_train, Z", "[] layer_sizes = [] for l in range(num_layers): kernel =", "None] N, M = 50, 25 X = np.random.uniform(0, 1,", "Z, kernels, layer_sizes, Gaussian(), num_samples=100) # init hidden layers to", "as tf import matplotlib.pyplot as plt from gpflow.kernels import White,", "Y_train, Z = get_data() dgp = make_deep_GP(3, X_train, Y_train, Z)", "None] f_step = lambda x: 0. if x < 0.5", "RBF from gpflow.likelihoods import Gaussian from deep_gp import DeepGP np.random.seed(0)", "Y, Z): kernels = [] layer_sizes = [] for l", "tape: tape.watch(dgp.trainable_variables) objective = -dgp.elbo((X_train, Y_train)) gradients = tape.gradient(objective, dgp.trainable_variables)", "l in range(num_layers): kernel = RBF(lengthscales=0.2, variance=1.0) + White(variance=1e-5) kernels.append(kernel)", "== '__main__': Xs, X_train, Y_train, Z = get_data() dgp =", "as tape: tape.watch(dgp.trainable_variables) objective = -dgp.elbo((X_train, Y_train)) gradients = tape.gradient(objective,", "def get_data(): Ns = 300 Xs = np.linspace(-0.5, 1.5, Ns)[:,", "= 50, 25 X = np.random.uniform(0, 1, N)[:, None] Z", "= make_deep_GP(3, X_train, Y_train, Z) optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-08) for", "if __name__ == '__main__': Xs, X_train, Y_train, Z = get_data()", "for _ in range(1500): with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(dgp.trainable_variables) objective", "X_train, Y_train, Z = get_data() dgp = make_deep_GP(3, X_train, Y_train,", "{-objective.numpy()}\") samples, _, _ = dgp.predict_all_layers(Xs, num_samples=50, full_cov=True) plt.plot(Xs, samples[-1].numpy()[:,", "return Xs, X, Y, Z def make_deep_GP(num_layers, X, Y, Z):", "= 300 Xs = np.linspace(-0.5, 1.5, Ns)[:, None] N, M", "50, 25 X = np.random.uniform(0, 1, N)[:, None] Z =", "< 0.5 else 1. Y = np.reshape([f_step(x) for x in", "300 Xs = np.linspace(-0.5, 1.5, Ns)[:, None] N, M =", "+ White(variance=1e-5) kernels.append(kernel) layer_sizes.append(1) dgp = DeepGP(X, Y, Z, kernels,", "= get_data() dgp = make_deep_GP(3, X_train, Y_train, Z) optimizer =", "= [] for l in range(num_layers): kernel = RBF(lengthscales=0.2, variance=1.0)", "kernels = [] layer_sizes = [] for l in range(num_layers):", "# init hidden layers to be near deterministic for layer", "np.random.uniform(0, 1, N)[:, None] Z = np.random.uniform(0, 1, M)[:, None]", "Y = np.reshape([f_step(x) for x in X], X.shape) + np.random.randn(", "from gpflow.kernels import White, RBF from gpflow.likelihoods import Gaussian from", "Ns = 300 Xs = np.linspace(-0.5, 1.5, Ns)[:, None] N,", "kernels.append(kernel) layer_sizes.append(1) dgp = DeepGP(X, Y, Z, kernels, layer_sizes, Gaussian(),", "near deterministic for layer in dgp.layers[:-1]: layer.q_sqrt.assign(layer.q_sqrt * 1e-5) return", "dgp.layers[:-1]: layer.q_sqrt.assign(layer.q_sqrt * 1e-5) return dgp if __name__ == '__main__':", "matplotlib.pyplot as plt from gpflow.kernels import White, RBF from gpflow.likelihoods", "= dgp.predict_all_layers(Xs, num_samples=50, full_cov=True) plt.plot(Xs, samples[-1].numpy()[:, :, 0].T, color='r', alpha=0.3)", "x in X], X.shape) + np.random.randn( *X.shape) * 1e-2 return", "Xs, X, Y, Z def make_deep_GP(num_layers, X, Y, Z): kernels", "X, Y, Z def make_deep_GP(num_layers, X, Y, Z): kernels =", "= RBF(lengthscales=0.2, variance=1.0) + White(variance=1e-5) kernels.append(kernel) layer_sizes.append(1) dgp = DeepGP(X,", "DeepGP(X, Y, Z, kernels, layer_sizes, Gaussian(), num_samples=100) # init hidden", "= np.random.uniform(0, 1, M)[:, None] f_step = lambda x: 0.", "lambda x: 0. if x < 0.5 else 1. Y", ":, 0].T, color='r', alpha=0.3) plt.title('Deep Gaussian Process') plt.scatter(X_train, Y_train) plt.show()", "= lambda x: 0. if x < 0.5 else 1." ]
[ "# If Lithops' storage supported Copy Object operations, this could", "use_db_mutex=True, ): moldb_cache = CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs) with ExitStack()", "import logging from contextlib import contextmanager, ExitStack from typing import", "operations, this could be easily optimized. # Not sure if", "# https://github.com/python/mypy/issues/4122 # Include the `targeted` value of databases so", "(peaks_cobjs, f'{self.prefix}/peaks')], runtime_memory=1024, ) # Save config in case it's", "to copy. save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key) return new_db_data_cobjs, new_peaks_cobjs", "= sm_storage['centroids'] self.prefix = f\"{raw_prefix}/{self.ds_hash}\" self.config_key = f'{self.prefix}/ds_config.json' self.meta_key =", "Save list of cobjects. This list would be easy to", "new_db_data_cobjs, new_peaks_cobjs = self.executor.map( batch_copy, [(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')], runtime_memory=1024,", "# Include the `targeted` value of databases so that a", "the last step of the process is helpful to confirm", "if use_db_mutex: stack.enter_context(moldb_cache.lock()) if use_cache: cached_val = moldb_cache.load() else: cached_val", "executor.storage self.bucket, raw_prefix = sm_storage['centroids'] self.prefix = f\"{raw_prefix}/{self.ds_hash}\" self.config_key =", "cached_val = moldb_cache.load() else: cached_val = None moldb_cache.clear() if cached_val:", "with DBMutex().lock(self.ds_hash, timeout=3600): yield def load(self): try: db_data_cobjs, peaks_cobjs =", "be easy to reconstruct by listing keys, but # saving", "logger.info(f'Clearing centroids cache {self.prefix}') self.storage.delete_objects(self.bucket, keys) def get_moldb_centroids( executor: Executor,", "__future__ import annotations import json import logging from contextlib import", "fail to copy. save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key) return new_db_data_cobjs,", "keys, but # saving a separate object as the last", "import List, Dict import pandas as pd from lithops.storage import", "of the process is helpful to confirm that # the", "from sm.engine.annotation_lithops.calculate_centroids import ( calculate_centroids, validate_centroids, ) from sm.engine.annotation_lithops.executor import", "be easily optimized. # Not sure if it's worth the", "from typing import List, Dict import pandas as pd from", "if keys: logger.info(f'Clearing centroids cache {self.prefix}') self.storage.delete_objects(self.bucket, keys) def get_moldb_centroids(", "cache item is complete, and didn't partially fail to copy.", "= [] for i, data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)): dest_key =", "lock(self): with DBMutex().lock(self.ds_hash, timeout=3600): yield def load(self): try: db_data_cobjs, peaks_cobjs", "self.meta_key) return new_db_data_cobjs, new_peaks_cobjs def clear(self): keys = self.storage.list_keys(self.bucket, self.prefix)", "deserialize, ) from sm.engine.annotation_lithops.utils import jsonhash from sm.engine.utils.db_mutex import DBMutex", "as stack: if use_db_mutex: stack.enter_context(moldb_cache.lock()) if use_cache: cached_val = moldb_cache.load()", "last step of the process is helpful to confirm that", "json import logging from contextlib import contextmanager, ExitStack from typing", "(new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key) return new_db_data_cobjs, new_peaks_cobjs def clear(self): keys", "self.executor.map( batch_copy, [(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')], runtime_memory=1024, ) # Save", "if # someone manually changes that field 'databases': [(moldb['id'], moldb['targeted'])", ") from sm.engine.annotation_lithops.calculate_centroids import ( calculate_centroids, validate_centroids, ) from sm.engine.annotation_lithops.executor", "Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb], debug_validate=False, use_cache=True, use_db_mutex=True,", "dest_prefix: str, *, storage: Storage): # If Lithops' storage supported", "import pandas as pd from lithops.storage import Storage from lithops.storage.utils", "db_data_cobjs, peaks_cobjs except StorageNoSuchKeyError: return None def save(self, db_data_cobjs: List[CObj[DbFDRData]],", "logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms to cache') return db_data_cobjs,", "else: cached_val = None moldb_cache.clear() if cached_val: db_data_cobjs, peaks_cobjs =", "= build_moldb(executor, ds_config, moldbs) isocalc_wrapper = IsocalcWrapper(ds_config) peaks_cobjs = calculate_centroids(executor,", "https://github.com/python/mypy/issues/4122 # Include the `targeted` value of databases so that", "None moldb_cache.clear() if cached_val: db_data_cobjs, peaks_cobjs = cached_val logger.info( f'Loaded", "cached_val logger.info( f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms from cache'", "list of cobjects. This list would be easy to reconstruct", "contextmanager, ExitStack from typing import List, Dict import pandas as", "DSConfig from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper logger = logging.getLogger('annotation-pipeline') class CentroidsCacheEntry:", ") # Save config in case it's needed for debugging", "self.ds_config = { **ds_hash_params, # type: ignore # https://github.com/python/mypy/issues/4122 #", "sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb], debug_validate=False, use_cache=True, use_db_mutex=True, ):", "} # Remove database_ids as it may be in a", "may be in a different order to moldbs del self.ds_config['database_ids']", "would be easy to reconstruct by listing keys, but #", "json.dumps(self.ds_config, indent=4), self.bucket, self.config_key ) # Save list of cobjects.", "ds_config.copy() self.ds_config = { **ds_hash_params, # type: ignore # https://github.com/python/mypy/issues/4122", "List[CObj[pd.DataFrame]]): def batch_copy(src_cobjs: List[CloudObject], dest_prefix: str, *, storage: Storage): #", "cached_val = None moldb_cache.clear() if cached_val: db_data_cobjs, peaks_cobjs = cached_val", "peak segms from cache' ) else: formula_cobjs, db_data_cobjs = build_moldb(executor,", "by listing keys, but # saving a separate object as", "cache {self.prefix}') self.storage.delete_objects(self.bucket, keys) def get_moldb_centroids( executor: Executor, sm_storage: Dict,", "list would be easy to reconstruct by listing keys, but", "with ExitStack() as stack: if use_db_mutex: stack.enter_context(moldb_cache.lock()) if use_cache: cached_val", "CObj, save_cobj, iter_cobjects_with_prefetch, deserialize, ) from sm.engine.annotation_lithops.utils import jsonhash from", "= f'{self.prefix}/meta' @contextmanager def lock(self): with DBMutex().lock(self.ds_hash, timeout=3600): yield def", "return new_db_data_cobjs, new_peaks_cobjs def clear(self): keys = self.storage.list_keys(self.bucket, self.prefix) if", "peaks_cobjs = cached_val logger.info( f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms", "listing keys, but # saving a separate object as the", "batch_copy(src_cobjs: List[CloudObject], dest_prefix: str, *, storage: Storage): # If Lithops'", "that # the cache item is complete, and didn't partially", "segms from cache' ) else: formula_cobjs, db_data_cobjs = build_moldb(executor, ds_config,", "Storage from lithops.storage.utils import CloudObject, StorageNoSuchKeyError from sm.engine.annotation_lithops.build_moldb import (", "sm.engine.utils.db_mutex import DBMutex from sm.engine.ds_config import DSConfig from sm.engine.annotation.isocalc_wrapper import", "in moldbs], } # Remove database_ids as it may be", "stack.enter_context(moldb_cache.lock()) if use_cache: cached_val = moldb_cache.load() else: cached_val = None", "process is helpful to confirm that # the cache item", "self.ds_config['database_ids'] self.ds_hash = jsonhash(self.ds_config) self.executor = executor self.storage = executor.storage", "raw_prefix = sm_storage['centroids'] self.prefix = f\"{raw_prefix}/{self.ds_hash}\" self.config_key = f'{self.prefix}/ds_config.json' self.meta_key", "cache entry is made if # someone manually changes that", "needed for debugging self.storage.put_cloudobject( json.dumps(self.ds_config, indent=4), self.bucket, self.config_key ) #", "item is complete, and didn't partially fail to copy. save_cobj(self.storage,", "CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs) with ExitStack() as stack: if use_db_mutex:", "StorageNoSuchKeyError from sm.engine.annotation_lithops.build_moldb import ( build_moldb, InputMolDb, DbFDRData, ) from", "separate object as the last step of the process is", "= None moldb_cache.clear() if cached_val: db_data_cobjs, peaks_cobjs = cached_val logger.info(", "self.storage.delete_objects(self.bucket, keys) def get_moldb_centroids( executor: Executor, sm_storage: Dict, ds_config: DSConfig,", "sm.engine.annotation_lithops.io import ( CObj, save_cobj, iter_cobjects_with_prefetch, deserialize, ) from sm.engine.annotation_lithops.utils", "validate_centroids, ) from sm.engine.annotation_lithops.executor import Executor from sm.engine.annotation_lithops.io import (", "complete, and didn't partially fail to copy. save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs),", "of databases so that a new cache entry is made", "result_cobjs = [] for i, data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)): dest_key", "in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)): dest_key = f'{dest_prefix}/{i:06}' result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key)) return", "stack: if use_db_mutex: stack.enter_context(moldb_cache.lock()) if use_cache: cached_val = moldb_cache.load() else:", "enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)): dest_key = f'{dest_prefix}/{i:06}' result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key)) return result_cobjs", "sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb] ): ds_hash_params = ds_config.copy()", "use_cache: cached_val = moldb_cache.load() else: cached_val = None moldb_cache.clear() if", "to reconstruct by listing keys, but # saving a separate", "Copy Object operations, this could be easily optimized. # Not", "of cobjects. This list would be easy to reconstruct by", "manually changes that field 'databases': [(moldb['id'], moldb['targeted']) for moldb in", "type: ignore # https://github.com/python/mypy/issues/4122 # Include the `targeted` value of", "f\"{raw_prefix}/{self.ds_hash}\" self.config_key = f'{self.prefix}/ds_config.json' self.meta_key = f'{self.prefix}/meta' @contextmanager def lock(self):", "**ds_hash_params, # type: ignore # https://github.com/python/mypy/issues/4122 # Include the `targeted`", "worth the effort yet result_cobjs = [] for i, data", "executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb] ): ds_hash_params", "config in case it's needed for debugging self.storage.put_cloudobject( json.dumps(self.ds_config, indent=4),", "peaks_cobjs: List[CObj[pd.DataFrame]]): def batch_copy(src_cobjs: List[CloudObject], dest_prefix: str, *, storage: Storage):", "self.storage.put_cloudobject( json.dumps(self.ds_config, indent=4), self.bucket, self.config_key ) # Save list of", "partially fail to copy. save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key) return", "dest_bucket, dest_key)) return result_cobjs dest_bucket = self.bucket # Copy cobjs", "= self.executor.map( batch_copy, [(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')], runtime_memory=1024, ) #", "changes that field 'databases': [(moldb['id'], moldb['targeted']) for moldb in moldbs],", "def load(self): try: db_data_cobjs, peaks_cobjs = deserialize( self.storage.get_object(self.bucket, self.meta_key) )", "= self.storage.list_keys(self.bucket, self.prefix) if keys: logger.info(f'Clearing centroids cache {self.prefix}') self.storage.delete_objects(self.bucket,", "# Save list of cobjects. This list would be easy", "if cached_val: db_data_cobjs, peaks_cobjs = cached_val logger.info( f'Loaded {len(db_data_cobjs)} DBs,", "keys) def get_moldb_centroids( executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs:", "from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper logger = logging.getLogger('annotation-pipeline') class CentroidsCacheEntry: def", "__init__( self, executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb]", "indent=4), self.bucket, self.config_key ) # Save list of cobjects. This", "step of the process is helpful to confirm that #", "result_cobjs dest_bucket = self.bucket # Copy cobjs to the cache", "ds_hash_params = ds_config.copy() self.ds_config = { **ds_hash_params, # type: ignore", "self.meta_key = f'{self.prefix}/meta' @contextmanager def lock(self): with DBMutex().lock(self.ds_hash, timeout=3600): yield", "from sm.engine.utils.db_mutex import DBMutex from sm.engine.ds_config import DSConfig from sm.engine.annotation.isocalc_wrapper", "for i, data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)): dest_key = f'{dest_prefix}/{i:06}' result_cobjs.append(storage.put_cloudobject(data,", "import ( CObj, save_cobj, iter_cobjects_with_prefetch, deserialize, ) from sm.engine.annotation_lithops.utils import", "sm_storage['centroids'] self.prefix = f\"{raw_prefix}/{self.ds_hash}\" self.config_key = f'{self.prefix}/ds_config.json' self.meta_key = f'{self.prefix}/meta'", "moldbs: List[InputMolDb], debug_validate=False, use_cache=True, use_db_mutex=True, ): moldb_cache = CentroidsCacheEntry(executor, sm_storage,", "annotations import json import logging from contextlib import contextmanager, ExitStack", "in case it's needed for debugging self.storage.put_cloudobject( json.dumps(self.ds_config, indent=4), self.bucket,", "db_data_cobjs, peaks_cobjs = deserialize( self.storage.get_object(self.bucket, self.meta_key) ) return db_data_cobjs, peaks_cobjs", "lithops.storage import Storage from lithops.storage.utils import CloudObject, StorageNoSuchKeyError from sm.engine.annotation_lithops.build_moldb", "Dict import pandas as pd from lithops.storage import Storage from", "to moldbs del self.ds_config['database_ids'] self.ds_hash = jsonhash(self.ds_config) self.executor = executor", "Copy cobjs to the cache dir new_db_data_cobjs, new_peaks_cobjs = self.executor.map(", "is helpful to confirm that # the cache item is", "cache' ) else: formula_cobjs, db_data_cobjs = build_moldb(executor, ds_config, moldbs) isocalc_wrapper", "# the cache item is complete, and didn't partially fail", "runtime_memory=1024, ) # Save config in case it's needed for", "db_data_cobjs = build_moldb(executor, ds_config, moldbs) isocalc_wrapper = IsocalcWrapper(ds_config) peaks_cobjs =", "def __init__( self, executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs:", "easily optimized. # Not sure if it's worth the effort", "self.bucket, self.config_key ) # Save list of cobjects. This list", "return None def save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]): def batch_copy(src_cobjs:", "else: formula_cobjs, db_data_cobjs = build_moldb(executor, ds_config, moldbs) isocalc_wrapper = IsocalcWrapper(ds_config)", "= deserialize( self.storage.get_object(self.bucket, self.meta_key) ) return db_data_cobjs, peaks_cobjs except StorageNoSuchKeyError:", "sm.engine.annotation_lithops.utils import jsonhash from sm.engine.utils.db_mutex import DBMutex from sm.engine.ds_config import", "pandas as pd from lithops.storage import Storage from lithops.storage.utils import", "new cache entry is made if # someone manually changes", "peaks_cobjs except StorageNoSuchKeyError: return None def save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs:", "= jsonhash(self.ds_config) self.executor = executor self.storage = executor.storage self.bucket, raw_prefix", "[(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')], runtime_memory=1024, ) # Save config in", "is complete, and didn't partially fail to copy. save_cobj(self.storage, (new_db_data_cobjs,", "calculate_centroids(executor, formula_cobjs, isocalc_wrapper) if debug_validate: validate_centroids(executor, peaks_cobjs) moldb_cache.save(db_data_cobjs, peaks_cobjs) logger.info(f'Saved", "contextlib import contextmanager, ExitStack from typing import List, Dict import", "result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key)) return result_cobjs dest_bucket = self.bucket # Copy", "del self.ds_config['database_ids'] self.ds_hash = jsonhash(self.ds_config) self.executor = executor self.storage =", "different order to moldbs del self.ds_config['database_ids'] self.ds_hash = jsonhash(self.ds_config) self.executor", "import Executor from sm.engine.annotation_lithops.io import ( CObj, save_cobj, iter_cobjects_with_prefetch, deserialize,", "): ds_hash_params = ds_config.copy() self.ds_config = { **ds_hash_params, # type:", "for debugging self.storage.put_cloudobject( json.dumps(self.ds_config, indent=4), self.bucket, self.config_key ) # Save", "db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]): def batch_copy(src_cobjs: List[CloudObject], dest_prefix: str, *,", "object as the last step of the process is helpful", "new_peaks_cobjs = self.executor.map( batch_copy, [(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')], runtime_memory=1024, )", "Object operations, this could be easily optimized. # Not sure", "isocalc_wrapper) if debug_validate: validate_centroids(executor, peaks_cobjs) moldb_cache.save(db_data_cobjs, peaks_cobjs) logger.info(f'Saved {len(db_data_cobjs)} DBs,", "saving a separate object as the last step of the", "if it's worth the effort yet result_cobjs = [] for", "if use_cache: cached_val = moldb_cache.load() else: cached_val = None moldb_cache.clear()", "ds_config, moldbs) with ExitStack() as stack: if use_db_mutex: stack.enter_context(moldb_cache.lock()) if", "easy to reconstruct by listing keys, but # saving a", "Include the `targeted` value of databases so that a new", "dest_bucket = self.bucket # Copy cobjs to the cache dir", "clear(self): keys = self.storage.list_keys(self.bucket, self.prefix) if keys: logger.info(f'Clearing centroids cache", "use_db_mutex: stack.enter_context(moldb_cache.lock()) if use_cache: cached_val = moldb_cache.load() else: cached_val =", "copy. save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key) return new_db_data_cobjs, new_peaks_cobjs def", "`targeted` value of databases so that a new cache entry", "use_cache=True, use_db_mutex=True, ): moldb_cache = CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs) with", "import CloudObject, StorageNoSuchKeyError from sm.engine.annotation_lithops.build_moldb import ( build_moldb, InputMolDb, DbFDRData,", "logger = logging.getLogger('annotation-pipeline') class CentroidsCacheEntry: def __init__( self, executor: Executor,", "formula_cobjs, db_data_cobjs = build_moldb(executor, ds_config, moldbs) isocalc_wrapper = IsocalcWrapper(ds_config) peaks_cobjs", "def batch_copy(src_cobjs: List[CloudObject], dest_prefix: str, *, storage: Storage): # If", "moldbs) with ExitStack() as stack: if use_db_mutex: stack.enter_context(moldb_cache.lock()) if use_cache:", "# Save config in case it's needed for debugging self.storage.put_cloudobject(", "sm.engine.annotation_lithops.calculate_centroids import ( calculate_centroids, validate_centroids, ) from sm.engine.annotation_lithops.executor import Executor", "debug_validate: validate_centroids(executor, peaks_cobjs) moldb_cache.save(db_data_cobjs, peaks_cobjs) logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak", "import jsonhash from sm.engine.utils.db_mutex import DBMutex from sm.engine.ds_config import DSConfig", "# Remove database_ids as it may be in a different", "str, *, storage: Storage): # If Lithops' storage supported Copy", "{len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms from cache' ) else: formula_cobjs,", "sure if it's worth the effort yet result_cobjs = []", "lithops.storage.utils import CloudObject, StorageNoSuchKeyError from sm.engine.annotation_lithops.build_moldb import ( build_moldb, InputMolDb,", "save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key) return new_db_data_cobjs, new_peaks_cobjs def clear(self):", "ignore # https://github.com/python/mypy/issues/4122 # Include the `targeted` value of databases", "dest_key = f'{dest_prefix}/{i:06}' result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key)) return result_cobjs dest_bucket =", "the cache dir new_db_data_cobjs, new_peaks_cobjs = self.executor.map( batch_copy, [(db_data_cobjs, f'{self.prefix}/db_data'),", "as the last step of the process is helpful to", "the `targeted` value of databases so that a new cache", "InputMolDb, DbFDRData, ) from sm.engine.annotation_lithops.calculate_centroids import ( calculate_centroids, validate_centroids, )", "cobjects. This list would be easy to reconstruct by listing", "entry is made if # someone manually changes that field", "ds_config, moldbs) isocalc_wrapper = IsocalcWrapper(ds_config) peaks_cobjs = calculate_centroids(executor, formula_cobjs, isocalc_wrapper)", "executor self.storage = executor.storage self.bucket, raw_prefix = sm_storage['centroids'] self.prefix =", "from lithops.storage import Storage from lithops.storage.utils import CloudObject, StorageNoSuchKeyError from", "f'{self.prefix}/ds_config.json' self.meta_key = f'{self.prefix}/meta' @contextmanager def lock(self): with DBMutex().lock(self.ds_hash, timeout=3600):", ") # Save list of cobjects. This list would be", "from sm.engine.annotation_lithops.utils import jsonhash from sm.engine.utils.db_mutex import DBMutex from sm.engine.ds_config", "but # saving a separate object as the last step", "moldbs], } # Remove database_ids as it may be in", "List, Dict import pandas as pd from lithops.storage import Storage", "import contextmanager, ExitStack from typing import List, Dict import pandas", ") from sm.engine.annotation_lithops.utils import jsonhash from sm.engine.utils.db_mutex import DBMutex from", "DBs, {len(peaks_cobjs)} peak segms from cache' ) else: formula_cobjs, db_data_cobjs", "StorageNoSuchKeyError: return None def save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]): def", "order to moldbs del self.ds_config['database_ids'] self.ds_hash = jsonhash(self.ds_config) self.executor =", "sm.engine.annotation_lithops.build_moldb import ( build_moldb, InputMolDb, DbFDRData, ) from sm.engine.annotation_lithops.calculate_centroids import", "build_moldb, InputMolDb, DbFDRData, ) from sm.engine.annotation_lithops.calculate_centroids import ( calculate_centroids, validate_centroids,", "moldb_cache.clear() if cached_val: db_data_cobjs, peaks_cobjs = cached_val logger.info( f'Loaded {len(db_data_cobjs)}", "self.meta_key) ) return db_data_cobjs, peaks_cobjs except StorageNoSuchKeyError: return None def", "moldb_cache.load() else: cached_val = None moldb_cache.clear() if cached_val: db_data_cobjs, peaks_cobjs", "return db_data_cobjs, peaks_cobjs except StorageNoSuchKeyError: return None def save(self, db_data_cobjs:", "from sm.engine.annotation_lithops.executor import Executor from sm.engine.annotation_lithops.io import ( CObj, save_cobj,", ") else: formula_cobjs, db_data_cobjs = build_moldb(executor, ds_config, moldbs) isocalc_wrapper =", "new_peaks_cobjs def clear(self): keys = self.storage.list_keys(self.bucket, self.prefix) if keys: logger.info(f'Clearing", "data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)): dest_key = f'{dest_prefix}/{i:06}' result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key))", "{self.prefix}') self.storage.delete_objects(self.bucket, keys) def get_moldb_centroids( executor: Executor, sm_storage: Dict, ds_config:", "didn't partially fail to copy. save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket, self.meta_key)", "= f'{dest_prefix}/{i:06}' result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key)) return result_cobjs dest_bucket = self.bucket", "from sm.engine.annotation_lithops.io import ( CObj, save_cobj, iter_cobjects_with_prefetch, deserialize, ) from", "sm.engine.ds_config import DSConfig from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper logger = logging.getLogger('annotation-pipeline')", "Save config in case it's needed for debugging self.storage.put_cloudobject( json.dumps(self.ds_config,", "Executor from sm.engine.annotation_lithops.io import ( CObj, save_cobj, iter_cobjects_with_prefetch, deserialize, )", "CentroidsCacheEntry: def __init__( self, executor: Executor, sm_storage: Dict, ds_config: DSConfig,", "from cache' ) else: formula_cobjs, db_data_cobjs = build_moldb(executor, ds_config, moldbs)", "self.storage.get_object(self.bucket, self.meta_key) ) return db_data_cobjs, peaks_cobjs except StorageNoSuchKeyError: return None", "This list would be easy to reconstruct by listing keys,", "peaks_cobjs) moldb_cache.save(db_data_cobjs, peaks_cobjs) logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms to", "Remove database_ids as it may be in a different order", "optimized. # Not sure if it's worth the effort yet", "self.ds_hash = jsonhash(self.ds_config) self.executor = executor self.storage = executor.storage self.bucket,", "self.config_key = f'{self.prefix}/ds_config.json' self.meta_key = f'{self.prefix}/meta' @contextmanager def lock(self): with", "self.storage.list_keys(self.bucket, self.prefix) if keys: logger.info(f'Clearing centroids cache {self.prefix}') self.storage.delete_objects(self.bucket, keys)", "List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]): def batch_copy(src_cobjs: List[CloudObject], dest_prefix: str, *, storage:", "field 'databases': [(moldb['id'], moldb['targeted']) for moldb in moldbs], } #", "executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb], debug_validate=False, use_cache=True,", "from contextlib import contextmanager, ExitStack from typing import List, Dict", "timeout=3600): yield def load(self): try: db_data_cobjs, peaks_cobjs = deserialize( self.storage.get_object(self.bucket,", "i, data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)): dest_key = f'{dest_prefix}/{i:06}' result_cobjs.append(storage.put_cloudobject(data, dest_bucket,", "the effort yet result_cobjs = [] for i, data in", "{ **ds_hash_params, # type: ignore # https://github.com/python/mypy/issues/4122 # Include the", "that field 'databases': [(moldb['id'], moldb['targeted']) for moldb in moldbs], }", "DbFDRData, ) from sm.engine.annotation_lithops.calculate_centroids import ( calculate_centroids, validate_centroids, ) from", "the cache item is complete, and didn't partially fail to", "self.prefix) if keys: logger.info(f'Clearing centroids cache {self.prefix}') self.storage.delete_objects(self.bucket, keys) def", "sm.engine.annotation.isocalc_wrapper import IsocalcWrapper logger = logging.getLogger('annotation-pipeline') class CentroidsCacheEntry: def __init__(", "import DSConfig from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper logger = logging.getLogger('annotation-pipeline') class", "to the cache dir new_db_data_cobjs, new_peaks_cobjs = self.executor.map( batch_copy, [(db_data_cobjs,", "iter_cobjects_with_prefetch, deserialize, ) from sm.engine.annotation_lithops.utils import jsonhash from sm.engine.utils.db_mutex import", "List[InputMolDb], debug_validate=False, use_cache=True, use_db_mutex=True, ): moldb_cache = CentroidsCacheEntry(executor, sm_storage, ds_config,", "ds_config: DSConfig, moldbs: List[InputMolDb], debug_validate=False, use_cache=True, use_db_mutex=True, ): moldb_cache =", "= cached_val logger.info( f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms from", "self.prefix = f\"{raw_prefix}/{self.ds_hash}\" self.config_key = f'{self.prefix}/ds_config.json' self.meta_key = f'{self.prefix}/meta' @contextmanager", "peaks_cobjs = deserialize( self.storage.get_object(self.bucket, self.meta_key) ) return db_data_cobjs, peaks_cobjs except", "the process is helpful to confirm that # the cache", "cached_val: db_data_cobjs, peaks_cobjs = cached_val logger.info( f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)}", "Dict, ds_config: DSConfig, moldbs: List[InputMolDb] ): ds_hash_params = ds_config.copy() self.ds_config", "if debug_validate: validate_centroids(executor, peaks_cobjs) moldb_cache.save(db_data_cobjs, peaks_cobjs) logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)}", "logging.getLogger('annotation-pipeline') class CentroidsCacheEntry: def __init__( self, executor: Executor, sm_storage: Dict,", "ds_config: DSConfig, moldbs: List[InputMolDb] ): ds_hash_params = ds_config.copy() self.ds_config =", "new_peaks_cobjs), self.bucket, self.meta_key) return new_db_data_cobjs, new_peaks_cobjs def clear(self): keys =", "keys = self.storage.list_keys(self.bucket, self.prefix) if keys: logger.info(f'Clearing centroids cache {self.prefix}')", "def clear(self): keys = self.storage.list_keys(self.bucket, self.prefix) if keys: logger.info(f'Clearing centroids", "calculate_centroids, validate_centroids, ) from sm.engine.annotation_lithops.executor import Executor from sm.engine.annotation_lithops.io import", "deserialize( self.storage.get_object(self.bucket, self.meta_key) ) return db_data_cobjs, peaks_cobjs except StorageNoSuchKeyError: return", "def save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]): def batch_copy(src_cobjs: List[CloudObject], dest_prefix:", "jsonhash from sm.engine.utils.db_mutex import DBMutex from sm.engine.ds_config import DSConfig from", "Dict, ds_config: DSConfig, moldbs: List[InputMolDb], debug_validate=False, use_cache=True, use_db_mutex=True, ): moldb_cache", "so that a new cache entry is made if #", "= logging.getLogger('annotation-pipeline') class CentroidsCacheEntry: def __init__( self, executor: Executor, sm_storage:", "it may be in a different order to moldbs del", "self.storage = executor.storage self.bucket, raw_prefix = sm_storage['centroids'] self.prefix = f\"{raw_prefix}/{self.ds_hash}\"", "[(moldb['id'], moldb['targeted']) for moldb in moldbs], } # Remove database_ids", "to confirm that # the cache item is complete, and", "def lock(self): with DBMutex().lock(self.ds_hash, timeout=3600): yield def load(self): try: db_data_cobjs,", "( CObj, save_cobj, iter_cobjects_with_prefetch, deserialize, ) from sm.engine.annotation_lithops.utils import jsonhash", "this could be easily optimized. # Not sure if it's", "Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb] ): ds_hash_params =", "it's worth the effort yet result_cobjs = [] for i,", "case it's needed for debugging self.storage.put_cloudobject( json.dumps(self.ds_config, indent=4), self.bucket, self.config_key", "databases so that a new cache entry is made if", "class CentroidsCacheEntry: def __init__( self, executor: Executor, sm_storage: Dict, ds_config:", "build_moldb(executor, ds_config, moldbs) isocalc_wrapper = IsocalcWrapper(ds_config) peaks_cobjs = calculate_centroids(executor, formula_cobjs,", "debug_validate=False, use_cache=True, use_db_mutex=True, ): moldb_cache = CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs)", "moldb['targeted']) for moldb in moldbs], } # Remove database_ids as", "from lithops.storage.utils import CloudObject, StorageNoSuchKeyError from sm.engine.annotation_lithops.build_moldb import ( build_moldb,", "could be easily optimized. # Not sure if it's worth", "= calculate_centroids(executor, formula_cobjs, isocalc_wrapper) if debug_validate: validate_centroids(executor, peaks_cobjs) moldb_cache.save(db_data_cobjs, peaks_cobjs)", "DSConfig, moldbs: List[InputMolDb] ): ds_hash_params = ds_config.copy() self.ds_config = {", "self.bucket, self.meta_key) return new_db_data_cobjs, new_peaks_cobjs def clear(self): keys = self.storage.list_keys(self.bucket,", "None def save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]): def batch_copy(src_cobjs: List[CloudObject],", "# Not sure if it's worth the effort yet result_cobjs", "made if # someone manually changes that field 'databases': [(moldb['id'],", ") return db_data_cobjs, peaks_cobjs except StorageNoSuchKeyError: return None def save(self,", "a different order to moldbs del self.ds_config['database_ids'] self.ds_hash = jsonhash(self.ds_config)", "If Lithops' storage supported Copy Object operations, this could be", "supported Copy Object operations, this could be easily optimized. #", "storage: Storage): # If Lithops' storage supported Copy Object operations,", "from sm.engine.annotation_lithops.build_moldb import ( build_moldb, InputMolDb, DbFDRData, ) from sm.engine.annotation_lithops.calculate_centroids", "= f\"{raw_prefix}/{self.ds_hash}\" self.config_key = f'{self.prefix}/ds_config.json' self.meta_key = f'{self.prefix}/meta' @contextmanager def", "self.bucket, raw_prefix = sm_storage['centroids'] self.prefix = f\"{raw_prefix}/{self.ds_hash}\" self.config_key = f'{self.prefix}/ds_config.json'", "IsocalcWrapper logger = logging.getLogger('annotation-pipeline') class CentroidsCacheEntry: def __init__( self, executor:", "for moldb in moldbs], } # Remove database_ids as it", "database_ids as it may be in a different order to", "sm_storage, ds_config, moldbs) with ExitStack() as stack: if use_db_mutex: stack.enter_context(moldb_cache.lock())", "CloudObject, StorageNoSuchKeyError from sm.engine.annotation_lithops.build_moldb import ( build_moldb, InputMolDb, DbFDRData, )", "= moldb_cache.load() else: cached_val = None moldb_cache.clear() if cached_val: db_data_cobjs,", "= self.bucket # Copy cobjs to the cache dir new_db_data_cobjs,", "cache dir new_db_data_cobjs, new_peaks_cobjs = self.executor.map( batch_copy, [(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs,", "from sm.engine.ds_config import DSConfig from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper logger =", "get_moldb_centroids( executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb], debug_validate=False,", "f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')], runtime_memory=1024, ) # Save config in case", "import ( build_moldb, InputMolDb, DbFDRData, ) from sm.engine.annotation_lithops.calculate_centroids import (", "logging from contextlib import contextmanager, ExitStack from typing import List,", "jsonhash(self.ds_config) self.executor = executor self.storage = executor.storage self.bucket, raw_prefix =", "'databases': [(moldb['id'], moldb['targeted']) for moldb in moldbs], } # Remove", "formula_cobjs, isocalc_wrapper) if debug_validate: validate_centroids(executor, peaks_cobjs) moldb_cache.save(db_data_cobjs, peaks_cobjs) logger.info(f'Saved {len(db_data_cobjs)}", "import json import logging from contextlib import contextmanager, ExitStack from", "List[InputMolDb] ): ds_hash_params = ds_config.copy() self.ds_config = { **ds_hash_params, #", "reconstruct by listing keys, but # saving a separate object", "@contextmanager def lock(self): with DBMutex().lock(self.ds_hash, timeout=3600): yield def load(self): try:", "DBMutex().lock(self.ds_hash, timeout=3600): yield def load(self): try: db_data_cobjs, peaks_cobjs = deserialize(", "is made if # someone manually changes that field 'databases':", "f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms from cache' ) else:", "someone manually changes that field 'databases': [(moldb['id'], moldb['targeted']) for moldb", "ExitStack from typing import List, Dict import pandas as pd", "from __future__ import annotations import json import logging from contextlib", "= executor self.storage = executor.storage self.bucket, raw_prefix = sm_storage['centroids'] self.prefix", "helpful to confirm that # the cache item is complete,", "new_db_data_cobjs, new_peaks_cobjs def clear(self): keys = self.storage.list_keys(self.bucket, self.prefix) if keys:", "DBMutex from sm.engine.ds_config import DSConfig from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper logger", "moldb_cache.save(db_data_cobjs, peaks_cobjs) logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms to cache')", "as it may be in a different order to moldbs", "self.config_key ) # Save list of cobjects. This list would", "# Copy cobjs to the cache dir new_db_data_cobjs, new_peaks_cobjs =", "logger.info( f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms from cache' )", "pd from lithops.storage import Storage from lithops.storage.utils import CloudObject, StorageNoSuchKeyError", "f'{self.prefix}/meta' @contextmanager def lock(self): with DBMutex().lock(self.ds_hash, timeout=3600): yield def load(self):", "load(self): try: db_data_cobjs, peaks_cobjs = deserialize( self.storage.get_object(self.bucket, self.meta_key) ) return", "and didn't partially fail to copy. save_cobj(self.storage, (new_db_data_cobjs, new_peaks_cobjs), self.bucket,", "= ds_config.copy() self.ds_config = { **ds_hash_params, # type: ignore #", ") from sm.engine.annotation_lithops.executor import Executor from sm.engine.annotation_lithops.io import ( CObj,", "Storage): # If Lithops' storage supported Copy Object operations, this", "isocalc_wrapper = IsocalcWrapper(ds_config) peaks_cobjs = calculate_centroids(executor, formula_cobjs, isocalc_wrapper) if debug_validate:", "{len(peaks_cobjs)} peak segms from cache' ) else: formula_cobjs, db_data_cobjs =", "# type: ignore # https://github.com/python/mypy/issues/4122 # Include the `targeted` value", "= f'{self.prefix}/ds_config.json' self.meta_key = f'{self.prefix}/meta' @contextmanager def lock(self): with DBMutex().lock(self.ds_hash,", "centroids cache {self.prefix}') self.storage.delete_objects(self.bucket, keys) def get_moldb_centroids( executor: Executor, sm_storage:", "List[CloudObject], dest_prefix: str, *, storage: Storage): # If Lithops' storage", "[] for i, data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)): dest_key = f'{dest_prefix}/{i:06}'", "moldb in moldbs], } # Remove database_ids as it may", "f'{self.prefix}/peaks')], runtime_memory=1024, ) # Save config in case it's needed", "dir new_db_data_cobjs, new_peaks_cobjs = self.executor.map( batch_copy, [(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')],", "# saving a separate object as the last step of", "DSConfig, moldbs: List[InputMolDb], debug_validate=False, use_cache=True, use_db_mutex=True, ): moldb_cache = CentroidsCacheEntry(executor,", "import ( calculate_centroids, validate_centroids, ) from sm.engine.annotation_lithops.executor import Executor from", "Not sure if it's worth the effort yet result_cobjs =", "effort yet result_cobjs = [] for i, data in enumerate(iter_cobjects_with_prefetch(storage,", "*, storage: Storage): # If Lithops' storage supported Copy Object", "moldbs del self.ds_config['database_ids'] self.ds_hash = jsonhash(self.ds_config) self.executor = executor self.storage", "a new cache entry is made if # someone manually", "except StorageNoSuchKeyError: return None def save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]):", "= IsocalcWrapper(ds_config) peaks_cobjs = calculate_centroids(executor, formula_cobjs, isocalc_wrapper) if debug_validate: validate_centroids(executor,", "self.executor = executor self.storage = executor.storage self.bucket, raw_prefix = sm_storage['centroids']", "save_cobj, iter_cobjects_with_prefetch, deserialize, ) from sm.engine.annotation_lithops.utils import jsonhash from sm.engine.utils.db_mutex", "( calculate_centroids, validate_centroids, ) from sm.engine.annotation_lithops.executor import Executor from sm.engine.annotation_lithops.io", "try: db_data_cobjs, peaks_cobjs = deserialize( self.storage.get_object(self.bucket, self.meta_key) ) return db_data_cobjs,", "( build_moldb, InputMolDb, DbFDRData, ) from sm.engine.annotation_lithops.calculate_centroids import ( calculate_centroids,", "db_data_cobjs, peaks_cobjs = cached_val logger.info( f'Loaded {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak", "= { **ds_hash_params, # type: ignore # https://github.com/python/mypy/issues/4122 # Include", "peaks_cobjs = calculate_centroids(executor, formula_cobjs, isocalc_wrapper) if debug_validate: validate_centroids(executor, peaks_cobjs) moldb_cache.save(db_data_cobjs,", "yield def load(self): try: db_data_cobjs, peaks_cobjs = deserialize( self.storage.get_object(self.bucket, self.meta_key)", "it's needed for debugging self.storage.put_cloudobject( json.dumps(self.ds_config, indent=4), self.bucket, self.config_key )", "ExitStack() as stack: if use_db_mutex: stack.enter_context(moldb_cache.lock()) if use_cache: cached_val =", "save(self, db_data_cobjs: List[CObj[DbFDRData]], peaks_cobjs: List[CObj[pd.DataFrame]]): def batch_copy(src_cobjs: List[CloudObject], dest_prefix: str,", "batch_copy, [(db_data_cobjs, f'{self.prefix}/db_data'), (peaks_cobjs, f'{self.prefix}/peaks')], runtime_memory=1024, ) # Save config", "self.bucket # Copy cobjs to the cache dir new_db_data_cobjs, new_peaks_cobjs", "= executor.storage self.bucket, raw_prefix = sm_storage['centroids'] self.prefix = f\"{raw_prefix}/{self.ds_hash}\" self.config_key", "dest_key)) return result_cobjs dest_bucket = self.bucket # Copy cobjs to", "peaks_cobjs) logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms to cache') return", "value of databases so that a new cache entry is", "yet result_cobjs = [] for i, data in enumerate(iter_cobjects_with_prefetch(storage, src_cobjs)):", "moldb_cache = CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs) with ExitStack() as stack:", "that a new cache entry is made if # someone", "cobjs to the cache dir new_db_data_cobjs, new_peaks_cobjs = self.executor.map( batch_copy,", "confirm that # the cache item is complete, and didn't", "in a different order to moldbs del self.ds_config['database_ids'] self.ds_hash =", "f'{dest_prefix}/{i:06}' result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key)) return result_cobjs dest_bucket = self.bucket #", "def get_moldb_centroids( executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb],", "validate_centroids(executor, peaks_cobjs) moldb_cache.save(db_data_cobjs, peaks_cobjs) logger.info(f'Saved {len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms", "keys: logger.info(f'Clearing centroids cache {self.prefix}') self.storage.delete_objects(self.bucket, keys) def get_moldb_centroids( executor:", "return result_cobjs dest_bucket = self.bucket # Copy cobjs to the", "import annotations import json import logging from contextlib import contextmanager,", "moldbs: List[InputMolDb] ): ds_hash_params = ds_config.copy() self.ds_config = { **ds_hash_params,", "# someone manually changes that field 'databases': [(moldb['id'], moldb['targeted']) for", "= CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs) with ExitStack() as stack: if", "debugging self.storage.put_cloudobject( json.dumps(self.ds_config, indent=4), self.bucket, self.config_key ) # Save list", "): moldb_cache = CentroidsCacheEntry(executor, sm_storage, ds_config, moldbs) with ExitStack() as", "Lithops' storage supported Copy Object operations, this could be easily", "{len(db_data_cobjs)} DBs, {len(peaks_cobjs)} peak segms to cache') return db_data_cobjs, peaks_cobjs", "as pd from lithops.storage import Storage from lithops.storage.utils import CloudObject,", "typing import List, Dict import pandas as pd from lithops.storage", "storage supported Copy Object operations, this could be easily optimized.", "src_cobjs)): dest_key = f'{dest_prefix}/{i:06}' result_cobjs.append(storage.put_cloudobject(data, dest_bucket, dest_key)) return result_cobjs dest_bucket", "sm.engine.annotation_lithops.executor import Executor from sm.engine.annotation_lithops.io import ( CObj, save_cobj, iter_cobjects_with_prefetch,", "moldbs) isocalc_wrapper = IsocalcWrapper(ds_config) peaks_cobjs = calculate_centroids(executor, formula_cobjs, isocalc_wrapper) if", "IsocalcWrapper(ds_config) peaks_cobjs = calculate_centroids(executor, formula_cobjs, isocalc_wrapper) if debug_validate: validate_centroids(executor, peaks_cobjs)", "import Storage from lithops.storage.utils import CloudObject, StorageNoSuchKeyError from sm.engine.annotation_lithops.build_moldb import", "import IsocalcWrapper logger = logging.getLogger('annotation-pipeline') class CentroidsCacheEntry: def __init__( self,", "a separate object as the last step of the process", "import DBMutex from sm.engine.ds_config import DSConfig from sm.engine.annotation.isocalc_wrapper import IsocalcWrapper", "self, executor: Executor, sm_storage: Dict, ds_config: DSConfig, moldbs: List[InputMolDb] ):", "be in a different order to moldbs del self.ds_config['database_ids'] self.ds_hash" ]
[ "isInPlace(self): \"\"\" Returns whether the connector is connected to a", "the node connected to the bottom of the connection for", "\"\"\" self._connections.append(connection) connection.setStart(self.scenePos()) def updateConnectionPositions(self): \"\"\" Updates the connected connections,", "None: if action == removeTop: self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index) elif action ==", "if action is not None: if action == removeTop: self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(),", "to this connector (implementation for parent class) \"\"\" nodes =", "self.getConnectionCount() == 0: disconnectTop.setEnabled(False) removeTop = contextMenu.addAction(\"Remove\") action = contextMenu.exec_(event.screenPos())", "== removeTop: self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index) elif action == renameTop: self._nodeEditor.tryToRenameTopBlob(self) elif", "for parent class) \"\"\" self._connections.append(connection) connection.setStart(self.scenePos()) def updateConnectionPositions(self): \"\"\" Updates", "connection in self._connections: connectionsBottomConnector = connection.getBottomConnector() if connectionsBottomConnector is not", "start of all connected connections to this connectors position (implementation", "disconnectTop = contextMenu.addAction(\"Disconnect\") if self.getConnectionCount() == 0: disconnectTop.setEnabled(False) removeTop =", "connection.setStart(self.scenePos()) def updateConnectionPositions(self): \"\"\" Updates the connected connections, sets the", "parent class) \"\"\" return True def isInPlace(self): \"\"\" Returns whether", "a top connector (implementation for parent class) \"\"\" return True", "ConnectorItem class ConnectorTopItem(ConnectorItem): \"\"\" Class to provide top connector functionality", "connector is in place. (implementation for parent class) \"\"\" for", "def getConnectedNodes(self): \"\"\" Returns a list of node items, connected", "contextMenu.exec_(event.screenPos()) if action is not None: if action == removeTop:", "bottom connector is in place. (implementation for parent class) \"\"\"", "None: nodes.append(connectionsBottomConnector.getNodeItem()) return nodes def addConnection(self, connection): \"\"\" Adds a", "all connected connections to this connectors position (implementation for parent", "each connection get the node connected to the bottom of", "the connector is connected to a in-place working layer A", "contextMenuEvent(self, event): \"\"\" Context menu for the top connector \"\"\"", "= list() # for each connection get the node connected", "for each connection get the node connected to the bottom", "place. (implementation for parent class) \"\"\" for connection in self._connections:", "connector functionality \"\"\" def __init__(self, index, nodeItem, nodeEditor, parent=None): super(ConnectorTopItem,", "action = contextMenu.exec_(event.screenPos()) if action is not None: if action", "connector (implementation for parent class) \"\"\" nodes = list() #", "Context menu for the top connector \"\"\" contextMenu = QMenu()", "False def getConnectedNodes(self): \"\"\" Returns a list of node items,", "if any connected bottom connector is in place. (implementation for", "to this connectors position (implementation for parent class) \"\"\" self._connections.append(connection)", "def updateConnectionPositions(self): \"\"\" Updates the connected connections, sets the start", "\"\"\" Context menu for the top connector \"\"\" contextMenu =", "top connector \"\"\" contextMenu = QMenu() renameTop = contextMenu.addAction(\"Change name\")", "to provide top connector functionality \"\"\" def __init__(self, index, nodeItem,", "removeTop = contextMenu.addAction(\"Remove\") action = contextMenu.exec_(event.screenPos()) if action is not", "class) \"\"\" for connection in self._connections: if connection.getIsInPlace(): return True", "position (implementation for parent class) \"\"\" self._connections.append(connection) connection.setStart(self.scenePos()) def updateConnectionPositions(self):", "menu for the top connector \"\"\" contextMenu = QMenu() renameTop", "(implementation for parent class) \"\"\" nodes = list() # for", "sets the start of the connection to this connectors position", "Returns a list of node items, connected to this connector", "not None: nodes.append(connectionsBottomConnector.getNodeItem()) return nodes def addConnection(self, connection): \"\"\" Adds", "action is not None: if action == removeTop: self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index)", "disconnectTop.setEnabled(False) removeTop = contextMenu.addAction(\"Remove\") action = contextMenu.exec_(event.screenPos()) if action is", "nodeEditor, parent) def isTopConnector(self): \"\"\" Returns whether the connector is", "gui.main_window.node_editor.items.connector_item import ConnectorItem class ConnectorTopItem(ConnectorItem): \"\"\" Class to provide top", "connections, sets the start of all connected connections to this", "is a top connector (implementation for parent class) \"\"\" return", "import QMenu from gui.main_window.node_editor.items.connector_item import ConnectorItem class ConnectorTopItem(ConnectorItem): \"\"\" Class", "is connected to a in-place working layer A top connector", "= connection.getBottomConnector() if connectionsBottomConnector is not None: nodes.append(connectionsBottomConnector.getNodeItem()) return nodes", "connector (implementation for parent class) \"\"\" return True def isInPlace(self):", "the start of all connected connections to this connectors position", "place if any connected bottom connector is in place. (implementation", "(implementation for parent class) \"\"\" for connection in self._connections: if", "a list of node items, connected to this connector (implementation", "connected to the bottom of the connection for connection in", "elif action == renameTop: self._nodeEditor.tryToRenameTopBlob(self) elif action == disconnectTop: self._nodeEditor.disconnectTopBlob(self._nodeItem.getLayerID(),", "\"\"\" Class to provide top connector functionality \"\"\" def __init__(self,", "\"\"\" Returns a list of node items, connected to this", "self).__init__(index, nodeItem, nodeEditor, parent) def isTopConnector(self): \"\"\" Returns whether the", "A top connector is in place if any connected bottom", "contextMenu.addAction(\"Disconnect\") if self.getConnectionCount() == 0: disconnectTop.setEnabled(False) removeTop = contextMenu.addAction(\"Remove\") action", "connection to the connector and sets the start of the", "not None: if action == removeTop: self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index) elif action", "if connectionsBottomConnector is not None: nodes.append(connectionsBottomConnector.getNodeItem()) return nodes def addConnection(self,", "class) \"\"\" return True def isInPlace(self): \"\"\" Returns whether the", "a connection to the connector and sets the start of", "parent class) \"\"\" for connection in self._connections: if connection.getIsInPlace(): return", "\"\"\" nodes = list() # for each connection get the", "nodeEditor, parent=None): super(ConnectorTopItem, self).__init__(index, nodeItem, nodeEditor, parent) def isTopConnector(self): \"\"\"", "connection): \"\"\" Adds a connection to the connector and sets", "the bottom of the connection for connection in self._connections: connectionsBottomConnector", "(implementation for parent class) \"\"\" return True def isInPlace(self): \"\"\"", "the connector is a top connector (implementation for parent class)", "from PyQt5.QtWidgets import QMenu from gui.main_window.node_editor.items.connector_item import ConnectorItem class ConnectorTopItem(ConnectorItem):", "connection for connection in self._connections: connectionsBottomConnector = connection.getBottomConnector() if connectionsBottomConnector", "to this connectors position (implementation for parent class) \"\"\" for", "== 0: disconnectTop.setEnabled(False) removeTop = contextMenu.addAction(\"Remove\") action = contextMenu.exec_(event.screenPos()) if", "connection.getIsInPlace(): return True return False def getConnectedNodes(self): \"\"\" Returns a", "QMenu from gui.main_window.node_editor.items.connector_item import ConnectorItem class ConnectorTopItem(ConnectorItem): \"\"\" Class to", "in self._connections: if connection.getIsInPlace(): return True return False def getConnectedNodes(self):", "in self._connections: connection.setStart(self.scenePos()) def contextMenuEvent(self, event): \"\"\" Context menu for", "in self._connections: connectionsBottomConnector = connection.getBottomConnector() if connectionsBottomConnector is not None:", "for the top connector \"\"\" contextMenu = QMenu() renameTop =", "addConnection(self, connection): \"\"\" Adds a connection to the connector and", "connection in self._connections: connection.setStart(self.scenePos()) def contextMenuEvent(self, event): \"\"\" Context menu", "connectionsBottomConnector is not None: nodes.append(connectionsBottomConnector.getNodeItem()) return nodes def addConnection(self, connection):", "connected bottom connector is in place. (implementation for parent class)", "\"\"\" for connection in self._connections: if connection.getIsInPlace(): return True return", "the connected connections, sets the start of all connected connections", "this connectors position (implementation for parent class) \"\"\" for connection", "list of node items, connected to this connector (implementation for", "from gui.main_window.node_editor.items.connector_item import ConnectorItem class ConnectorTopItem(ConnectorItem): \"\"\" Class to provide", "import ConnectorItem class ConnectorTopItem(ConnectorItem): \"\"\" Class to provide top connector", "node items, connected to this connector (implementation for parent class)", "nodes def addConnection(self, connection): \"\"\" Adds a connection to the", "parent) def isTopConnector(self): \"\"\" Returns whether the connector is a", "parent class) \"\"\" for connection in self._connections: connection.setStart(self.scenePos()) def contextMenuEvent(self,", "return False def getConnectedNodes(self): \"\"\" Returns a list of node", "if self.getConnectionCount() == 0: disconnectTop.setEnabled(False) removeTop = contextMenu.addAction(\"Remove\") action =", "is not None: if action == removeTop: self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index) elif", "= contextMenu.exec_(event.screenPos()) if action is not None: if action ==", "super(ConnectorTopItem, self).__init__(index, nodeItem, nodeEditor, parent) def isTopConnector(self): \"\"\" Returns whether", "return nodes def addConnection(self, connection): \"\"\" Adds a connection to", "= contextMenu.addAction(\"Change name\") disconnectTop = contextMenu.addAction(\"Disconnect\") if self.getConnectionCount() == 0:", "= QMenu() renameTop = contextMenu.addAction(\"Change name\") disconnectTop = contextMenu.addAction(\"Disconnect\") if", "is in place if any connected bottom connector is in", "connectionsBottomConnector = connection.getBottomConnector() if connectionsBottomConnector is not None: nodes.append(connectionsBottomConnector.getNodeItem()) return", "in place if any connected bottom connector is in place.", "connected to this connector (implementation for parent class) \"\"\" nodes", "if connection.getIsInPlace(): return True return False def getConnectedNodes(self): \"\"\" Returns", "provide top connector functionality \"\"\" def __init__(self, index, nodeItem, nodeEditor,", "to the bottom of the connection for connection in self._connections:", "\"\"\" Adds a connection to the connector and sets the", "removeTop: self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index) elif action == renameTop: self._nodeEditor.tryToRenameTopBlob(self) elif action", "connections to this connectors position (implementation for parent class) \"\"\"", "whether the connector is connected to a in-place working layer", "this connector (implementation for parent class) \"\"\" nodes = list()", "= contextMenu.addAction(\"Disconnect\") if self.getConnectionCount() == 0: disconnectTop.setEnabled(False) removeTop = contextMenu.addAction(\"Remove\")", "connection.getBottomConnector() if connectionsBottomConnector is not None: nodes.append(connectionsBottomConnector.getNodeItem()) return nodes def", "this connectors position (implementation for parent class) \"\"\" self._connections.append(connection) connection.setStart(self.scenePos())", "name\") disconnectTop = contextMenu.addAction(\"Disconnect\") if self.getConnectionCount() == 0: disconnectTop.setEnabled(False) removeTop", "of the connection to this connectors position (implementation for parent", "start of the connection to this connectors position (implementation for", "node connected to the bottom of the connection for connection", "action == renameTop: self._nodeEditor.tryToRenameTopBlob(self) elif action == disconnectTop: self._nodeEditor.disconnectTopBlob(self._nodeItem.getLayerID(), self._index)", "\"\"\" Returns whether the connector is a top connector (implementation", "functionality \"\"\" def __init__(self, index, nodeItem, nodeEditor, parent=None): super(ConnectorTopItem, self).__init__(index,", "Returns whether the connector is connected to a in-place working", "the top connector \"\"\" contextMenu = QMenu() renameTop = contextMenu.addAction(\"Change", "action == removeTop: self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index) elif action == renameTop: self._nodeEditor.tryToRenameTopBlob(self)", "working layer A top connector is in place if any", "for parent class) \"\"\" return True def isInPlace(self): \"\"\" Returns", "index, nodeItem, nodeEditor, parent=None): super(ConnectorTopItem, self).__init__(index, nodeItem, nodeEditor, parent) def", "layer A top connector is in place if any connected", "Class to provide top connector functionality \"\"\" def __init__(self, index,", "for parent class) \"\"\" for connection in self._connections: if connection.getIsInPlace():", "ConnectorTopItem(ConnectorItem): \"\"\" Class to provide top connector functionality \"\"\" def", "top connector is in place if any connected bottom connector", "parent class) \"\"\" nodes = list() # for each connection", "connector and sets the start of the connection to this", "class) \"\"\" self._connections.append(connection) connection.setStart(self.scenePos()) def updateConnectionPositions(self): \"\"\" Updates the connected", "for parent class) \"\"\" for connection in self._connections: connection.setStart(self.scenePos()) def", "renameTop = contextMenu.addAction(\"Change name\") disconnectTop = contextMenu.addAction(\"Disconnect\") if self.getConnectionCount() ==", "top connector (implementation for parent class) \"\"\" return True def", "(implementation for parent class) \"\"\" for connection in self._connections: connection.setStart(self.scenePos())", "connectors position (implementation for parent class) \"\"\" for connection in", "is in place. (implementation for parent class) \"\"\" for connection", "get the node connected to the bottom of the connection", "self._index) elif action == renameTop: self._nodeEditor.tryToRenameTopBlob(self) elif action == disconnectTop:", "nodeItem, nodeEditor, parent=None): super(ConnectorTopItem, self).__init__(index, nodeItem, nodeEditor, parent) def isTopConnector(self):", "connector is connected to a in-place working layer A top", "nodeItem, nodeEditor, parent) def isTopConnector(self): \"\"\" Returns whether the connector", "for connection in self._connections: connectionsBottomConnector = connection.getBottomConnector() if connectionsBottomConnector is", "def __init__(self, index, nodeItem, nodeEditor, parent=None): super(ConnectorTopItem, self).__init__(index, nodeItem, nodeEditor,", "(implementation for parent class) \"\"\" self._connections.append(connection) connection.setStart(self.scenePos()) def updateConnectionPositions(self): \"\"\"", "connector is in place if any connected bottom connector is", "self._connections.append(connection) connection.setStart(self.scenePos()) def updateConnectionPositions(self): \"\"\" Updates the connected connections, sets", "\"\"\" Returns whether the connector is connected to a in-place", "PyQt5.QtWidgets import QMenu from gui.main_window.node_editor.items.connector_item import ConnectorItem class ConnectorTopItem(ConnectorItem): \"\"\"", "connector \"\"\" contextMenu = QMenu() renameTop = contextMenu.addAction(\"Change name\") disconnectTop", "\"\"\" def __init__(self, index, nodeItem, nodeEditor, parent=None): super(ConnectorTopItem, self).__init__(index, nodeItem,", "Returns whether the connector is a top connector (implementation for", "connectors position (implementation for parent class) \"\"\" self._connections.append(connection) connection.setStart(self.scenePos()) def", "class) \"\"\" for connection in self._connections: connection.setStart(self.scenePos()) def contextMenuEvent(self, event):", "to the connector and sets the start of the connection", "connected to a in-place working layer A top connector is", "bottom of the connection for connection in self._connections: connectionsBottomConnector =", "of node items, connected to this connector (implementation for parent", "any connected bottom connector is in place. (implementation for parent", "list() # for each connection get the node connected to", "= contextMenu.addAction(\"Remove\") action = contextMenu.exec_(event.screenPos()) if action is not None:", "connection to this connectors position (implementation for parent class) \"\"\"", "if action == removeTop: self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index) elif action == renameTop:", "Updates the connected connections, sets the start of all connected", "items, connected to this connector (implementation for parent class) \"\"\"", "\"\"\" return True def isInPlace(self): \"\"\" Returns whether the connector", "top connector functionality \"\"\" def __init__(self, index, nodeItem, nodeEditor, parent=None):", "getConnectedNodes(self): \"\"\" Returns a list of node items, connected to", "parent class) \"\"\" self._connections.append(connection) connection.setStart(self.scenePos()) def updateConnectionPositions(self): \"\"\" Updates the", "QMenu() renameTop = contextMenu.addAction(\"Change name\") disconnectTop = contextMenu.addAction(\"Disconnect\") if self.getConnectionCount()", "self._nodeEditor.tryToRemoveTopBlob(self._nodeItem.getLayerID(), self._index) elif action == renameTop: self._nodeEditor.tryToRenameTopBlob(self) elif action ==", "connected connections, sets the start of all connected connections to", "connector is a top connector (implementation for parent class) \"\"\"", "the connection for connection in self._connections: connectionsBottomConnector = connection.getBottomConnector() if", "sets the start of all connected connections to this connectors", "the connector and sets the start of the connection to", "of all connected connections to this connectors position (implementation for", "return True def isInPlace(self): \"\"\" Returns whether the connector is", "updateConnectionPositions(self): \"\"\" Updates the connected connections, sets the start of", "in place. (implementation for parent class) \"\"\" for connection in", "parent=None): super(ConnectorTopItem, self).__init__(index, nodeItem, nodeEditor, parent) def isTopConnector(self): \"\"\" Returns", "# for each connection get the node connected to the", "contextMenu.addAction(\"Remove\") action = contextMenu.exec_(event.screenPos()) if action is not None: if", "the start of the connection to this connectors position (implementation", "\"\"\" Updates the connected connections, sets the start of all", "position (implementation for parent class) \"\"\" for connection in self._connections:", "__init__(self, index, nodeItem, nodeEditor, parent=None): super(ConnectorTopItem, self).__init__(index, nodeItem, nodeEditor, parent)", "self._connections: connection.setStart(self.scenePos()) def contextMenuEvent(self, event): \"\"\" Context menu for the", "whether the connector is a top connector (implementation for parent", "the connection to this connectors position (implementation for parent class)", "isTopConnector(self): \"\"\" Returns whether the connector is a top connector", "\"\"\" for connection in self._connections: connection.setStart(self.scenePos()) def contextMenuEvent(self, event): \"\"\"", "event): \"\"\" Context menu for the top connector \"\"\" contextMenu", "def addConnection(self, connection): \"\"\" Adds a connection to the connector", "def isTopConnector(self): \"\"\" Returns whether the connector is a top", "of the connection for connection in self._connections: connectionsBottomConnector = connection.getBottomConnector()", "True def isInPlace(self): \"\"\" Returns whether the connector is connected", "0: disconnectTop.setEnabled(False) removeTop = contextMenu.addAction(\"Remove\") action = contextMenu.exec_(event.screenPos()) if action", "for connection in self._connections: connection.setStart(self.scenePos()) def contextMenuEvent(self, event): \"\"\" Context", "connection get the node connected to the bottom of the", "nodes.append(connectionsBottomConnector.getNodeItem()) return nodes def addConnection(self, connection): \"\"\" Adds a connection", "class) \"\"\" nodes = list() # for each connection get", "self._connections: connectionsBottomConnector = connection.getBottomConnector() if connectionsBottomConnector is not None: nodes.append(connectionsBottomConnector.getNodeItem())", "to a in-place working layer A top connector is in", "def isInPlace(self): \"\"\" Returns whether the connector is connected to", "for parent class) \"\"\" nodes = list() # for each", "connected connections to this connectors position (implementation for parent class)", "connection.setStart(self.scenePos()) def contextMenuEvent(self, event): \"\"\" Context menu for the top", "connection in self._connections: if connection.getIsInPlace(): return True return False def", "nodes = list() # for each connection get the node", "a in-place working layer A top connector is in place", "class ConnectorTopItem(ConnectorItem): \"\"\" Class to provide top connector functionality \"\"\"", "Adds a connection to the connector and sets the start", "True return False def getConnectedNodes(self): \"\"\" Returns a list of", "contextMenu.addAction(\"Change name\") disconnectTop = contextMenu.addAction(\"Disconnect\") if self.getConnectionCount() == 0: disconnectTop.setEnabled(False)", "and sets the start of the connection to this connectors", "is not None: nodes.append(connectionsBottomConnector.getNodeItem()) return nodes def addConnection(self, connection): \"\"\"", "in-place working layer A top connector is in place if", "def contextMenuEvent(self, event): \"\"\" Context menu for the top connector", "contextMenu = QMenu() renameTop = contextMenu.addAction(\"Change name\") disconnectTop = contextMenu.addAction(\"Disconnect\")", "for connection in self._connections: if connection.getIsInPlace(): return True return False", "self._connections: if connection.getIsInPlace(): return True return False def getConnectedNodes(self): \"\"\"", "return True return False def getConnectedNodes(self): \"\"\" Returns a list", "\"\"\" contextMenu = QMenu() renameTop = contextMenu.addAction(\"Change name\") disconnectTop =" ]
[ "'APIResult': \"\"\" Retrieves multiple key-value pairs from cache. :param connection:", "int], key, sample, key_hint=None, sample_hint=None, binary=False, query_id=None, ) -> 'APIResult':", "Byte), ('key', key_hint or AnyDataObject), ], query_id=query_id, ) result =", "there is a value currently mapped for that key. :param", "Contains zero status and an old value or None on", "Contains zero status and a number of cache entries on", "binary else 0, 'key': key, }, ) def cache_clear_keys( connection:", "-> 'APIResult': \"\"\" Removes entries with given keys, notifying listeners", "is the Apache License, Version 2.0, and the Software is", "1 if binary else 0, 'keys': keys, }, ) def", "key_hint or AnyDataObject), ], query_id=query_id, ) return query_struct.perform( connection, query_params={", "value with a given key to cache, and returns the", "'Connection', cache: Union[str, int], key, key_hint=None, binary=False, query_id=None, ) ->", "Query( OP_CACHE_CLEAR, [ ('hash_code', Int), ('flag', Byte), ], query_id=query_id, )", "else 0, 'key': key, 'value': value, }) def cache_get( connection:", "Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "key already exists and value equals provided sample. :param connection:", "Query( OP_CACHE_CLEAR_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ],", "should be converted :param binary: (optional) pass True to keep", "to all cache partitions (PeekModes.ALL), :param binary: (optional) pass True", "pyignite.queries import Query, Response from pyignite.utils import cache_id def cache_put(", "and a value retrieved on success, non-zero status and an", "OP_CACHE_GET_SIZE, [ ('hash_code', Int), ('flag', Byte), ('peek_modes', PeekModes), ], query_id=query_id,", "if result.status == 0: result.value = result.value['value'] return result def", "defined below, subject to # the following condition. # #", "\"\"\" Puts a value with a given key to cache", "retrieved key-value pairs, non-zero status and an error description on", "a given key if provided value is equal to actual", "if provided value is equal to actual value, notifying listeners", "distributed under the # License is distributed on an \"AS", ":return: API result data object. Contains zero status and an", "'Connection', cache: Union[str, int], keys: list, binary=False, query_id=None, ) ->", "\"\"\" query_struct = Query( OP_CACHE_REMOVE_KEY, [ ('hash_code', Int), ('flag', Byte),", "return result result.value = result.value['value'] return result def cache_get_all( connection:", "the given sample should be converted :param binary: (optional) pass", "AnyDataObject), ], ) if result.status == 0: result.value = result.value['value']", "given key should be converted, :param binary: (optional) pass True", "tuple)): if peek_modes == 0: peek_modes = [] else: peek_modes", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Puts multiple key-value pairs", "(overwriting existing value if any). :param connection: connection to Ignite", "Int), ('flag', Byte), ('data', Map), ], query_id=query_id, ) return query_struct.perform(", "for a fee or other consideration (including without # limitation", "a value indicating whether given key is present in cache.", "value, notifying listeners and cache writers. :param connection: connection to", "OP_CACHE_GET_AND_REPLACE, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "if any). :param connection: connection to Ignite server, :param cache:", "query_struct = Query( OP_CACHE_CLEAR_KEY, [ ('hash_code', Int), ('flag', Byte), ('key',", "cache: name or ID of the cache, :param keys: list", ") def cache_get_size( connection: 'Connection', cache: Union[str, int], peek_modes=0, binary=False,", "value or None if a value is written, non-zero status", "failure. \"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEY, [ ('hash_code', Int), ('flag',", "import PeekModes from pyignite.queries import Query, Response from pyignite.utils import", "0: result.value = result.value['success'] return result def cache_replace_if_equals( connection: 'Connection',", "product or # service whose value derives, entirely or substantially,", "given key to cache only if the key already exist.", "wrong. \"\"\" query_struct = Query( OP_CACHE_REMOVE_KEY, [ ('hash_code', Int), ('flag',", "Byte, Int, Long, AnyDataArray, AnyDataObject, ) from pyignite.datatypes.key_value import PeekModes", "consideration (including without # limitation fees for hosting or consulting/", "notice. # # For purposes of the clause above, the", "cache: Union[str, int], key, sample, value, key_hint=None, sample_hint=None, value_hint=None, binary=False,", "GridGain Community Edition Licensing # Copyright 2019 GridGain Systems, Inc.", "query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte), ('key',", "query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag', Byte), ('key',", "# Licensed under the Apache License, Version 2.0 (the \"License\")", "all keys are present, `False` otherwise, non-zero status and an", "Int), ('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id, ) result =", "object. Contains zero status on success, non-zero status and an", "value for the key, :param key_hint: (optional) Ignite data type,", "return result def cache_put_all( connection: 'Connection', cache: Union[str, int], pairs:", "entry with a given key if provided value is equal", "query_id=None, ) -> 'APIResult': \"\"\" Retrieves a value from cache", "0: result.value = result.value['value'] return result def cache_get_and_replace( connection: 'Connection',", "a product or # service whose value derives, entirely or", "the License for the specific language governing permissions # and", "License. # # Commons Clause Restriction # # The Software", "data object. Contains zero status if a value is written,", "Int), ('flag', Byte), ('peek_modes', PeekModes), ], query_id=query_id, ) result =", "response_config=[ ('data', Map), ], ) if result.status == 0: result.value", "key to cache only if the key already exist. :param", "Sell the Software. # For purposes of the foregoing, “Sell”", "def cache_clear_key( connection: 'Connection', cache: Union[str, int], key, key_hint: object=None,", "present, `False` otherwise, non-zero status and an error description on", "sample should be converted :param binary: (optional) pass True to", "\"\"\" Removes an entry with a given key if provided", "type, for which the given value should be converted. :param", "\"\"\" Removes entries with given keys, notifying listeners and cache", "Byte), ], query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache),", ") -> 'APIResult': \"\"\" Returns a value indicating whether all", "Edition Licensing # Copyright 2019 GridGain Systems, Inc. # #", "query_struct = Query( OP_CACHE_GET_AND_PUT, [ ('hash_code', Int), ('flag', Byte), ('key',", "key to cache, returning previous value for that key, if", "not such key. :param connection: connection to Ignite server, :param", "= result.value['success'] return result def cache_replace_if_equals( connection: 'Connection', cache: Union[str,", "key-value pairs to cache (overwriting existing associations if any). :param", "Copyright 2019 GridGain Systems, Inc. # # Licensed under the", "be an item of representable Python type or a tuple", "== 0: result.value = result.value['success'] return result def cache_clear( connection:", "Union[str, int], key, key_hint: object=None, binary=False, query_id=None, ) -> 'APIResult':", "data type, for whic the given sample should be converted", "1 if binary else 0, 'keys': keys, }, response_config=[ ('value',", "Query( OP_CACHE_CONTAINS_KEY, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "2019 GridGain Systems, Inc. # # Licensed under the Apache", "0: return result result.value = result.value['value'] return result def cache_get_all(", "def cache_get_and_replace( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None,", "GridGain Community # Edition software provided with this notice. from", "Software. # For purposes of the foregoing, “Sell” means practicing", "# Edition software provided with this notice. from typing import", "for that key, if and only if there is a", "Byte), ('peek_modes', PeekModes), ], query_id=query_id, ) result = query_struct.perform( connection,", "given keys are present in cache. :param connection: connection to", "the given value should be converted. :param binary: (optional) pass", "key_hint=None, sample_hint=None, value_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Puts", "Int, Long, AnyDataArray, AnyDataObject, ) from pyignite.datatypes.key_value import PeekModes from", "description in case of error. \"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT,", "'Connection', cache: Union[str, int], key, key_hint: object=None, binary=False, query_id=None, )", "status and an error description in case of error. \"\"\"", "keys without notifying listeners or cache writers. :param connection: connection", "Community # Edition software provided with this notice. from typing", "sample. :param connection: connection to Ignite server, :param cache: name", "from typing import Iterable, Union from pyignite.queries.op_codes import * from", "True to keep the value in binary form. False by", "\"\"\" Clears the cache keys without notifying listeners or cache", "not use this file except in compliance with the License.", "= result.value['success'] return result def cache_remove_if_equals( connection: 'Connection', cache: Union[str,", "that key, or null value if there was not such", "under the Apache License, Version 2.0 (the \"License\") modified with", "value should be converted, :param binary: (optional) pass True to", "], ) if result.status == 0: result.value = result.value['count'] return", "success: `True` when all keys are present, `False` otherwise, non-zero", "exists and value equals provided sample. :param connection: connection to", "you may not use this file except in compliance with", "connection: 'Connection', cache: Union[str, int], binary=False, query_id=None, ) -> 'APIResult':", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REPLACE, [ ('hash_code',", "False by default, :param query_id: (optional) a value generated by", "to cache (overwriting existing value if any). :param connection: connection", "License, as defined below, subject to # the following condition.", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears the cache keys", "client and returned as-is in response.query_id. When the parameter is", "# # Without limiting other conditions in the License, the", "peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary", "query_id: a value generated by client and returned as-is in", "\"\"\" Returns a value indicating whether given key is present", "of keys or tuples of (key, key_hint), :param binary: (optional)", "the License. # # Commons Clause Restriction # # The", "cache_replace( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None, value_hint=None,", "success, non-zero status and an error description otherwise. \"\"\" if", "key, 'value': value, }, response_config=[ ('value', AnyDataObject), ], ) if", "cache (overwriting existing associations if any). :param connection: connection to", "], query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag':", "'value': value, }) def cache_get( connection: 'Connection', cache: Union[str, int],", "query_id=None, ) -> 'APIResult': \"\"\" Puts multiple key-value pairs to", "and an old value or None, non-zero status and an", "or AnyDataObject), ('value', value_hint or AnyDataObject), ], query_id=query_id, ) result", "for which the given key should be converted, :param binary:", "result.value['value'] return result def cache_get_all( connection: 'Connection', cache: Union[str, int],", "be converted :param binary: (optional) pass True to keep the", "to the Software), a product or # service whose value", "= result.value['value'] return result def cache_get_and_replace( connection: 'Connection', cache: Union[str,", "Query( OP_CACHE_REPLACE, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "'APIResult': \"\"\" Returns a value indicating whether all given keys", "[ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ('value',", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT,", "cache: Union[str, int], binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears", "something has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [", "Iterable, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Returns a value", "keys, }, response_config=[ ('data', Map), ], ) if result.status ==", "Each key or value can be an item of representable", "<gh_stars>1-10 # GridGain Community Edition Licensing # Copyright 2019 GridGain", "law or agreed to in writing, software distributed under the", "retrieved on success: `True` when key is present, `False` otherwise,", ") -> 'APIResult': \"\"\" Gets the number of entries in", "def cache_contains_keys( connection: 'Connection', cache: Union[str, int], keys: Iterable, binary=False,", "'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'data': pairs,", "Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject, ) from pyignite.datatypes.key_value", "functionality of the Software. # Any license notice or attribution", "value generated by client and returned as-is in response.query_id. When", "import * from pyignite.datatypes import ( Map, Bool, Byte, Int,", "below, subject to # the following condition. # # Without", "mapped for that key. :param connection: connection to Ignite server,", "on failure. \"\"\" query_struct = Query( OP_CACHE_GET_ALL, [ ('hash_code', Int),", "sample, value, key_hint=None, sample_hint=None, value_hint=None, binary=False, query_id=None, ) -> 'APIResult':", "for which the given key should be converted, :param value_hint:", "'flag': 1 if binary else 0, 'key': key, 'value': value,", "for whic the given sample should be converted :param value_hint:", "0: peek_modes = [] else: peek_modes = [peek_modes] query_struct =", "'sample': sample, }, response_config=[ ('success', Bool), ], ) if result.status", "which the given value should be converted. :param binary: pass", "\"\"\" query_struct = Query( OP_CACHE_REPLACE, [ ('hash_code', Int), ('flag', Byte),", "else 0, }, ) def cache_get_size( connection: 'Connection', cache: Union[str,", "\"\"\" Puts multiple key-value pairs to cache (overwriting existing associations", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR, [ ('hash_code',", "query_id=None, ) -> 'APIResult': \"\"\" Clears the cache key without", "result.value['success'] return result def cache_clear( connection: 'Connection', cache: Union[str, int],", "key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ], query_id=query_id, )", "-> 'APIResult': \"\"\" Removes the cache entry with specified key,", ") if result.status == 0: result.value = result.value['success'] return result", "result.value = result.value['value'] return result def cache_get_and_remove( connection: 'Connection', cache:", "1 if binary else 0, 'key': key, 'sample': sample, },", "return query_struct.perform(connection, { 'hash_code': cache_id(cache), 'flag': 1 if binary else", "result.value['success'] return result def cache_replace_if_equals( connection: 'Connection', cache: Union[str, int],", "consulting/ support services related to the Software), a product or", "has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REMOVE_KEY, [ ('hash_code',", "Version 2.0, and the Software is the GridGain Community #", "binary else 0, 'key': key, }, response_config=[ ('value', Bool), ],", "key_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Returns a value", "the grant of rights under the License will not #", "Iterable, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Retrieves multiple key-value", "key, }, response_config=[ ('success', Bool), ], ) if result.status ==", "cache_id(cache), 'flag': 1 if binary else 0, 'peek_modes': peek_modes, },", "the License will not # include, and the License does", "2019 GridGain Systems, Inc., # the “License” is the Apache", "'Connection', cache: Union[str, int], peek_modes=0, binary=False, query_id=None, ) -> 'APIResult':", "on success, non-zero status and an error description on failure.", "], query_id=query_id, ) return query_struct.perform(connection, { 'hash_code': cache_id(cache), 'flag': 1", "0, 'key': key, 'sample': sample, }, response_config=[ ('success', Bool), ],", "else 0, 'keys': keys, }, ) def cache_remove_all( connection: 'Connection',", "(key, key_hint), :param binary: (optional) pass True to keep the", "License Condition notice. # # For purposes of the clause", "result def cache_get_and_put_if_absent( connection: 'Connection', cache: Union[str, int], key, value,", "above, the “Licensor” is Copyright 2019 GridGain Systems, Inc., #", "the key, :param key_hint: (optional) Ignite data type, for which", "'flag': 1 if binary else 0, 'keys': keys, }, )", "by key. :param connection: connection to Ignite server, :param cache:", "AnyDataObject), ('sample', sample_hint or AnyDataObject), ], query_id=query_id, ) result =", "Returns a value indicating whether all given keys are present", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_REMOVE_KEYS, [", "bool value retrieved on success: `True` when all keys are", "exist. :param connection: connection to Ignite server, :param cache: name", "# # For purposes of the clause above, the “Licensor”", "\"\"\" query_struct = Query( OP_CACHE_CLEAR_KEY, [ ('hash_code', Int), ('flag', Byte),", "`True` when all keys are present, `False` otherwise, non-zero status", "key-value pairs are written, non-zero status and an error description", "cache: name or ID of the cache, :param key: key", "by default, :param query_id: (optional) a value generated by client", "under the License. # # Commons Clause Restriction # #", "the key does not already exist. :param connection: connection to", "data object. Contains zero status on success, non-zero status and", "value with a given key to cache, returning previous value", "def cache_put_all( connection: 'Connection', cache: Union[str, int], pairs: dict, binary=False,", "as defined below, subject to # the following condition. #", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT_ALL,", "Defaults to all cache partitions (PeekModes.ALL), :param binary: (optional) pass", ":param cache: name or ID of the cache, :param pairs:", "by client and returned as-is in response.query_id. When the parameter", "cache, :param key: key for the cache entry, :param sample:", "already exist. :param connection: connection to Ignite server, :param cache:", "= result.value['success'] return result def cache_get_and_put_if_absent( connection: 'Connection', cache: Union[str,", "Union[str, int], pairs: dict, binary=False, query_id=None, ) -> 'APIResult': \"\"\"", "clause above, the “Licensor” is Copyright 2019 GridGain Systems, Inc.,", ") -> 'APIResult': \"\"\" Puts multiple key-value pairs to cache", "previous value for that key, or null value if there", "binary else 0, 'key': key, }, response_config=[ ('success', Bool), ],", "the stored value with, :param value: new value for the", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_REMOVE_KEYS,", "response_config=[ ('success', Bool), ], ) if result.status == 0: result.value", "value or None on success, non-zero status and an error", "should be converted, :param sample_hint: (optional) Ignite data type, for", "'Connection', cache: Union[str, int], pairs: dict, binary=False, query_id=None, ) ->", "Apache License, Version 2.0, and the Software is the GridGain", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ ('hash_code', Int),", "zero status if key-value pairs are written, non-zero status and", "-> 'APIResult': \"\"\" Retrieves multiple key-value pairs from cache. :param", "else 0, 'keys': keys, }, ) def cache_remove_key( connection: 'Connection',", "1 if binary else 0, 'key': key, }, response_config=[ ('success',", "'key': key, 'value': value, }, response_config=[ ('success', Bool), ], )", "Inc., # the “License” is the Apache License, Version 2.0,", ") -> 'APIResult': \"\"\" Clears the cache keys without notifying", "form. False by default, :param query_id: a value generated by", "# # Unless required by applicable law or agreed to", "cache, :param peek_modes: (optional) limit count to near cache partition", "cache_id(cache), 'flag': 1 if binary else 0, 'keys': keys, },", "entirely or substantially, from the functionality of the Software. #", "the Software), a product or # service whose value derives,", "a given key to cache, and returns the previous value", "the given key should be converted, :param value_hint: (optional) Ignite", "Licensing # Copyright 2019 GridGain Systems, Inc. # # Licensed", "except in compliance with the License. You may obtain a", "provided to you by the Licensor under the License, as", "Puts a value with a given key to cache only", "('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id, ) return query_struct.perform( connection,", "and an old value or None on success, non-zero status", "error description on failure. \"\"\" query_struct = Query( OP_CACHE_GET, [", "None on success, non-zero status and an error description otherwise.", "cache only if the key does not already exist. :param", "cache entry, :param sample: a sample to compare the stored", "of error. \"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT, [ ('hash_code', Int),", "# Copyright 2019 GridGain Systems, Inc. # # Licensed under", "given key, :param key_hint: (optional) Ignite data type, for which", "key-value pairs from cache. :param connection: connection to Ignite server,", "('sample', sample_hint or AnyDataObject), ], query_id=query_id, ) result = query_struct.perform(", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT_ALL, [ ('hash_code',", "(key, type hint) tuples, :param binary: pass True to keep", "status and an error description on failure. \"\"\" query_struct =", "result.value['value'] return result def cache_put_if_absent( connection: 'Connection', cache: Union[str, int],", ":param binary: (optional) pass True to keep the value in", "the given key should be converted, :param binary: (optional) pass", "notice or attribution required by the License must also include", "given value should be converted. :param binary: (optional) pass True", "for the key, :param key_hint: (optional) Ignite data type, for", "otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag',", "writers. :param connection: connection to Ignite server, :param cache: name", "or (key, type hint) tuples, :param binary: pass True to", "or consulting/ support services related to the Software), a product", ") return query_struct.perform(connection, { 'hash_code': cache_id(cache), 'flag': 1 if binary", "an error description on failure. \"\"\" query_struct = Query( OP_CACHE_GET_ALL,", "entries on success, non-zero status and an error description otherwise.", "query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ ('hash_code', Int), ('flag', Byte), ('key',", "the cache, :param key: key for the cache entry. Can", "key_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes the cache", "ID of the cache, :param keys: list of keys or", "of retrieved key-value pairs, non-zero status and an error description", ") if result.status == 0: result.value = dict(result.value)['data'] return result", "if peek_modes == 0: peek_modes = [] else: peek_modes =", "key if provided value is equal to actual value, notifying", "software provided with this notice. from typing import Iterable, Union", "value for that key, if and only if there is", "for the cache entry, :param sample: a sample to compare", "cache, :param keys: a list of keys or (key, type", "('data', Map), ], ) if result.status == 0: result.value =", "\"\"\" query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ ('hash_code', Int), ('flag', Byte),", "Query( OP_CACHE_REMOVE_KEY, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "'value': value, }, response_config=[ ('value', AnyDataObject), ], ) if result.status", "a bool value retrieved on success: `True` when all keys", "cache: Union[str, int], key, key_hint=None, binary=False, query_id=None, ) -> 'APIResult':", "def cache_clear_keys( connection: 'Connection', cache: Union[str, int], keys: list, binary=False,", "Unless required by applicable law or agreed to in writing,", "typing import Iterable, Union from pyignite.queries.op_codes import * from pyignite.datatypes", "OP_CACHE_CONTAINS_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id,", "representable Python type or a tuple of (item, hint), :param", "the License must also include this Commons Clause # License", "query_struct = Query( OP_CACHE_REPLACE, [ ('hash_code', Int), ('flag', Byte), ('key',", ":param key: key for the cache entry, :param key_hint: (optional)", "('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ], query_id=query_id,", "== 0: peek_modes = [] else: peek_modes = [peek_modes] query_struct", "sample, key_hint=None, sample_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes", "0, 'key': key, 'value': value, }, response_config=[ ('success', Bool), ],", ") def cache_contains_key( connection: 'Connection', cache: Union[str, int], key, key_hint=None,", "(list, tuple)): if peek_modes == 0: peek_modes = [] else:", "query_id=None, ) -> 'APIResult': \"\"\" Gets the number of entries", "(including without # limitation fees for hosting or consulting/ support", "key_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Retrieves a value", "the given key, :param key_hint: (optional) Ignite data type, for", "express or implied. See the License for the specific language", "key for the cache entry. Can be of any supported", "connection: 'Connection', cache: Union[str, int], key, sample, value, key_hint=None, sample_hint=None,", "'Connection', cache: Union[str, int], key, value, key_hint=None, value_hint=None, binary=False, query_id=None,", "to Ignite server, :param cache: name or ID of the", "be converted. :param binary: pass True to keep the value", "API result data object. Contains zero status on success, non-zero", "or None on success, non-zero status and an error description", "a value with a given key to cache (overwriting existing", "value is generated, :return: API result data object. Contains zero", "zero status and a value retrieved on success, non-zero status", ") result = query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [", "the value. :param connection: connection to Ignite server, :param cache:", "Query( OP_CACHE_GET_ALL, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ],", "an error description in case of error. \"\"\" query_struct =", "}, ) def cache_remove_key( connection: 'Connection', cache: Union[str, int], key,", "name or ID of the cache, :param keys: a list", "Query( OP_CACHE_PUT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "from cache by key. :param connection: connection to Ignite server,", "binary else 0, 'key': key, 'value': value, }, response_config=[ ('success',", "}, response_config=[ ('value', Bool), ], ) if result.status == 0:", ") def cache_clear_keys( connection: 'Connection', cache: Union[str, int], keys: list,", "if binary else 0, 'key': key, }, response_config=[ ('value', AnyDataObject),", "and an error description otherwise. \"\"\" if not isinstance(peek_modes, (list,", "Query( OP_CACHE_GET_SIZE, [ ('hash_code', Int), ('flag', Byte), ('peek_modes', PeekModes), ],", ") if result.status == 0: result.value = result.value['value'] return result", "an old value or None, non-zero status and an error", "type, :param value: value for the key, :param key_hint: (optional)", "an old value or None on success, non-zero status and", "connection: 'Connection', cache: Union[str, int], key, value, key_hint=None, value_hint=None, binary=False,", "return result def cache_clear( connection: 'Connection', cache: Union[str, int], binary=False,", ":param value: value for the key, :param key_hint: (optional) Ignite", "Commons Clause # License Condition notice. # # For purposes", "License does not grant to you, the right to Sell", "cache_put_if_absent( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None, value_hint=None,", "be converted. :param binary: (optional) pass True to keep the", "# For purposes of the foregoing, “Sell” means practicing any", "}, response_config=[ ('data', Map), ], ) if result.status == 0:", "sample, 'value': value, }, response_config=[ ('success', Bool), ], ) if", "return result def cache_put_if_absent( connection: 'Connection', cache: Union[str, int], key,", "'APIResult': \"\"\" Removes an entry with a given key if", "}, ) def cache_contains_key( connection: 'Connection', cache: Union[str, int], key,", "key, sample, value, key_hint=None, sample_hint=None, value_hint=None, binary=False, query_id=None, ) ->", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR,", "`True` when key is present, `False` otherwise, non-zero status and", "required by the License must also include this Commons Clause", "zero status and a boolean success code, or non-zero status", "or AnyDataObject), ], query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code':", "query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag', Byte), ('key',", "cache, :param key: key for the cache entry, :param key_hint:", "OP_CACHE_REMOVE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "name or ID of the cache, :param peek_modes: (optional) limit", "1 if binary else 0, 'peek_modes': peek_modes, }, response_config=[ ('count',", "result def cache_contains_keys( connection: 'Connection', cache: Union[str, int], keys: Iterable,", "from pyignite.queries.op_codes import * from pyignite.datatypes import ( Map, Bool,", "key without notifying listeners or cache writers. :param connection: connection", "connection: 'Connection', cache: Union[str, int], pairs: dict, binary=False, query_id=None, )", "[ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ('sample',", "if binary else 0, 'key': key, }, ) def cache_clear_keys(", "Condition notice. # # For purposes of the clause above,", "Can be of any supported type, :param value: value for", "result data object. Contains zero status if key-value pairs are", "peek_modes=0, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Gets the number", "[ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ],", "Clears the cache keys without notifying listeners or cache writers.", "given key should be converted, :param value_hint: (optional) Ignite data", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR_KEYS, [ ('hash_code',", "}, ) def cache_clear_key( connection: 'Connection', cache: Union[str, int], key,", "the Software is the GridGain Community # Edition software provided", "description on failure. \"\"\" query_struct = Query( OP_CACHE_GET_ALL, [ ('hash_code',", "# KIND, either express or implied. See the License for", "a list of keys or (key, type hint) tuples, :param", "ID of the cache, :param peek_modes: (optional) limit count to", "converted :param binary: (optional) pass True to keep the value", "pyignite.datatypes.key_value import PeekModes from pyignite.queries import Query, Response from pyignite.utils", "success, non-zero status and an error description on failure. \"\"\"", "'flag': 1 if binary else 0, 'keys': keys, }, response_config=[", "are present in cache. :param connection: connection to Ignite server,", "status and a boolean success code, or non-zero status and", "cache: Union[str, int], peek_modes=0, binary=False, query_id=None, ) -> 'APIResult': \"\"\"", "AnyDataArray()), ], query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache),", "otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR_KEYS, [ ('hash_code', Int), ('flag',", "key to cache, and returns the previous value for that", "a value generated by client and returned as-is in response.query_id.", "old value or None on success, non-zero status and an", "\"\"\" query_struct = Query( OP_CACHE_REMOVE_ALL, [ ('hash_code', Int), ('flag', Byte),", "value is equal to actual value, notifying listeners and cache", "0: result.value = result.value['value'] return result def cache_contains_keys( connection: 'Connection',", "an error description on failure. \"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEY,", "sample: a sample to compare the stored value with, :param", "2.0 (the \"License\") modified with Commons Clause # Restriction; you", "status and an old value or None if a value", "value in binary form. False by default, :param query_id: a", "result.value['success'] return result def cache_get_and_put_if_absent( connection: 'Connection', cache: Union[str, int],", "0: result.value = result.value['success'] return result def cache_remove_keys( connection: 'Connection',", "1 if binary else 0, 'key': key, 'value': value, })", "key_hint), :param binary: (optional) pass True to keep the value", "'flag': 1 if binary else 0, }, ) def cache_clear_key(", "the cache key without notifying listeners or cache writers. :param", "\"\"\" query_struct = Query( OP_CACHE_PUT, [ ('hash_code', Int), ('flag', Byte),", "OF ANY # KIND, either express or implied. See the", "data object. Contains zero status and a bool value retrieved", "you # under the License to provide to third parties,", "an error description if something has gone wrong. \"\"\" query_struct", "to Sell the Software. # For purposes of the foregoing,", "cache_contains_key( connection: 'Connection', cache: Union[str, int], key, key_hint=None, binary=False, query_id=None,", "def cache_get_and_remove( connection: 'Connection', cache: Union[str, int], key, key_hint=None, binary=False,", "cache_get_and_put( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None, value_hint=None,", "to keep the value in binary form. False by default,", "or cache writers. :param connection: connection to Ignite server, :param", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "License will not # include, and the License does not", "description on failure. \"\"\" query_struct = Query( OP_CACHE_GET, [ ('hash_code',", "query_struct = Query( OP_CACHE_GET_SIZE, [ ('hash_code', Int), ('flag', Byte), ('peek_modes',", "you, the right to Sell the Software. # For purposes", "value in binary form. False by default, :param query_id: (optional)", "cache entry, :param key_hint: (optional) Ignite data type, for which", "# Restriction; you may not use this file except in", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "a value retrieved on success, non-zero status and an error", "result.value['value'] return result def cache_contains_keys( connection: 'Connection', cache: Union[str, int],", "= result.value['value'] return result def cache_contains_keys( connection: 'Connection', cache: Union[str,", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_REMOVE_ALL, [", "Query( OP_CACHE_GET, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "value, key_hint=None, sample_hint=None, value_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\"", "from cache, notifying listeners and cache writers. :param connection: connection", "PeekModes), ], query_id=query_id, ) result = query_struct.perform( connection, query_params={ 'hash_code':", "-> 'APIResult': \"\"\" Gets the number of entries in cache.", "or AnyDataObject), ], query_id=query_id, ) return query_struct.perform(connection, { 'hash_code': cache_id(cache),", "import ( Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject, )", "Gets the number of entries in cache. :param connection: connection", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Union[str, int], binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes all", "(optional) limit count to near cache partition (PeekModes.NEAR), primary cache", "error. \"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT, [ ('hash_code', Int), ('flag',", "cache, :param keys: list of keys or tuples of (key,", "Long), ], ) if result.status == 0: result.value = result.value['count']", "from pyignite.utils import cache_id def cache_put( connection: 'Connection', cache: Union[str,", "tuples of (key, key_hint), :param binary: (optional) pass True to", "sample should be converted :param value_hint: (optional) Ignite data type,", "Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ('sample', sample_hint or", "save. Each key or value can be an item of", "* from pyignite.datatypes import ( Map, Bool, Byte, Int, Long,", "0, 'keys': keys, }, ) def cache_remove_all( connection: 'Connection', cache:", "AnyDataObject, ) from pyignite.datatypes.key_value import PeekModes from pyignite.queries import Query,", "Bool, Byte, Int, Long, AnyDataArray, AnyDataObject, ) from pyignite.datatypes.key_value import", "the given value should be converted. :param binary: pass True", "should be converted, :param binary: pass True to keep the", "'value': value, }, response_config=[ ('success', Bool), ], ) if result.status", "( Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject, ) from", "# Any license notice or attribution required by the License", ") -> 'APIResult': \"\"\" Retrieves multiple key-value pairs from cache.", "Byte), ('key', key_hint or AnyDataObject), ], query_id=query_id, ) return query_struct.perform(", "Removes the cache entry with specified key, returning the value.", "response.query_id. When the parameter is omitted, a random value is", "description if something has gone wrong. \"\"\" query_struct = Query(", "value or None, non-zero status and an error description otherwise.", "value derives, entirely or substantially, from the functionality of the", "a given key to cache (overwriting existing value if any).", "in writing, software distributed under the # License is distributed", "of (item, hint), :param binary: (optional) pass True to keep", "API result data object. Contains zero status and a boolean", "'keys': keys, }, ) def cache_remove_key( connection: 'Connection', cache: Union[str,", "result.value = result.value['value'] return result def cache_get_all( connection: 'Connection', cache:", "gone wrong. \"\"\" query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ ('hash_code', Int),", "else 0, 'key': key, 'value': value, }, response_config=[ ('value', AnyDataObject),", "('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id, ) result = query_struct.perform(", "Byte), ('keys', AnyDataArray()), ], query_id=query_id, ) result = query_struct.perform( connection,", "with a given key to cache, and returns the previous", "OP_CACHE_CLEAR_KEY, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "OP_CACHE_REMOVE_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id,", "data type, for which the given value should be converted,", "for which the given key should be converted, :param sample_hint:", "cache_id(cache), 'flag': 1 if binary else 0, 'key': key, },", "API result data object. Contains zero status if a value", ") -> 'APIResult': \"\"\" Puts a value with a given", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT_ALL, [", "key to cache only if the key does not already", "form. False by default, :param query_id: (optional) a value generated", "Clears the cache without notifying listeners or cache writers. :param", ":param sample: a sample to compare the stored value with,", "cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to all cache", "“Licensor” is Copyright 2019 GridGain Systems, Inc., # the “License”", "= Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "# under the License to provide to third parties, for", "(the \"License\") modified with Commons Clause # Restriction; you may", "def cache_get_and_put( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None,", "For purposes of the foregoing, “Sell” means practicing any or", "('value', value_hint or AnyDataObject), ], query_id=query_id, ) return query_struct.perform(connection, {", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Returns a value indicating", "query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1", "to save. Each key or value can be an item", "cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP).", "actual value, notifying listeners and cache writers. :param connection: connection", "Apache License, Version 2.0 (the \"License\") modified with Commons Clause", "pairs, non-zero status and an error description on failure. \"\"\"", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_REPLACE, [", "def cache_contains_key( connection: 'Connection', cache: Union[str, int], key, key_hint=None, binary=False,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "Query( OP_CACHE_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "or substantially, from the functionality of the Software. # Any", "key_hint: object=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears the", "[ ('hash_code', Int), ('flag', Byte), ], query_id=query_id, ) return query_struct.perform(", "key, sample, key_hint=None, sample_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\"", "made of retrieved key-value pairs, non-zero status and an error", "list of keys or tuples of (key, key_hint), :param binary:", "binary else 0, 'keys': keys, }, response_config=[ ('value', Bool), ],", "the cache entry with specified key, returning the value. :param", "required by applicable law or agreed to in writing, software", "OP_CACHE_REPLACE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "\"\"\" query_struct = Query( OP_CACHE_CLEAR, [ ('hash_code', Int), ('flag', Byte),", ":param binary: pass True to keep the value in binary", "0: result.value = result.value['value'] return result def cache_put_if_absent( connection: 'Connection',", "'key': key, }, response_config=[ ('value', Bool), ], ) if result.status", "and a boolean success code, or non-zero status and an", "Byte), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ('value',", "notifying listeners and cache writers. :param connection: connection to Ignite", "to cache, and returns the previous value for that key,", "Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ], query_id=query_id, )", "entry, :param sample: a sample to compare the stored value", "returns the previous value for that key, or null value", "cache keys without notifying listeners or cache writers. :param connection:", "permissions # and limitations under the License. # # Commons", "and a bool value retrieved on success: `True` when key", "binary form. False by default, :param query_id: (optional) a value", "result def cache_get_all( connection: 'Connection', cache: Union[str, int], keys: Iterable,", "error description otherwise. \"\"\" if not isinstance(peek_modes, (list, tuple)): if", "zero status on success, non-zero status and an error description", "\"\"\" Clears the cache key without notifying listeners or cache", "of any supported type, :param value: value for the key,", "def cache_put( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None,", "AnyDataObject), ], query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache),", ":param connection: connection to Ignite server, :param cache: name or", "import Query, Response from pyignite.utils import cache_id def cache_put( connection:", "('keys', AnyDataArray()), ], query_id=query_id, ) result = query_struct.perform( connection, query_params={", "or all of the rights granted to you # under", "result data object. Contains zero status and a dict, made", "the cache keys without notifying listeners or cache writers. :param", "1 if binary else 0, 'key': key, }, response_config=[ ('value',", "the stored value with, :param key_hint: (optional) Ignite data type,", "cache only if the key already exists and value equals", "query_id=None, ) -> 'APIResult': \"\"\" Removes an entry with a", "whose value derives, entirely or substantially, from the functionality of", "be converted :param value_hint: (optional) Ignite data type, for which", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT, [ ('hash_code',", "def cache_clear( connection: 'Connection', cache: Union[str, int], binary=False, query_id=None, )", "result.value['success'] return result def cache_remove_keys( connection: 'Connection', cache: Union[str, int],", "def cache_get( connection: 'Connection', cache: Union[str, int], key, key_hint=None, binary=False,", "int], key, sample, value, key_hint=None, sample_hint=None, value_hint=None, binary=False, query_id=None, )", "else 0, 'key': key, 'sample': sample, }, response_config=[ ('success', Bool),", "of the cache, :param key: key for the cache entry,", "null value if there was not such key. :param connection:", "key to cache only if the key already exists and", "zero status and an old value or None if a", "= Query( OP_CACHE_CONTAINS_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()),", "cache_remove_key( connection: 'Connection', cache: Union[str, int], key, key_hint: object=None, binary=False,", "which the given value should be converted. :param binary: (optional)", "which the given key should be converted, :param binary: pass", "name or ID of the cache, :param binary: (optional) pass", "the Software. # Any license notice or attribution required by", "return result def cache_replace( connection: 'Connection', cache: Union[str, int], key,", "('hash_code', Int), ('flag', Byte), ('peek_modes', PeekModes), ], query_id=query_id, ) result", "(optional) Ignite data type, for which the given key should", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR_KEY, [ ('hash_code',", "pairs: dictionary type parameters, contains key-value pairs to save. Each", "with a given key to cache only if the key", "cache entries on success, non-zero status and an error description", ":param key_hint: (optional) Ignite data type, for which the given", "int], key, key_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Returns", "Bool), ], ) if result.status == 0: result.value = result.value['value']", "License, the grant of rights under the License will not", "from pyignite.datatypes import ( Map, Bool, Byte, Int, Long, AnyDataArray,", "OP_CACHE_REPLACE, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "}, response_config=[ ('value', AnyDataObject), ], ) if result.status == 0:", "status and a bool value retrieved on success: `True` when", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ ('hash_code',", "the cache, :param pairs: dictionary type parameters, contains key-value pairs", "tuple of (item, hint), :param binary: (optional) pass True to", "query_struct = Query( OP_CACHE_REMOVE_KEY, [ ('hash_code', Int), ('flag', Byte), ('key',", "conditions in the License, the grant of rights under the", "cache: Union[str, int], keys: Iterable, binary=False, query_id=None, ) -> 'APIResult':", "partitions (PeekModes.ALL), :param binary: (optional) pass True to keep the", "with, :param value: new value for the given key, :param", "GridGain Systems, Inc. # # Licensed under the Apache License,", "obtain a # copy of the License at # #", "provided sample. :param connection: connection to Ignite server, :param cache:", "Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ('value', value_hint or", "without # limitation fees for hosting or consulting/ support services", "name or ID of the cache, :param keys: list of", "\"\"\" Retrieves a value from cache by key. :param connection:", "-> 'APIResult': \"\"\" Returns a value indicating whether all given", "def cache_put_if_absent( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None,", "any or all of the rights granted to you #", "given key is present in cache. :param connection: connection to", "or other consideration (including without # limitation fees for hosting", "API result data object. Contains zero status and a number", "result.value = result.value['value'] return result def cache_get_and_replace( connection: 'Connection', cache:", "granted to you # under the License to provide to", "0: result.value = result.value['value'] return result def cache_get_and_put( connection: 'Connection',", "cache_get( connection: 'Connection', cache: Union[str, int], key, key_hint=None, binary=False, query_id=None,", "not # include, and the License does not grant to", "query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'key':", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_REMOVE_ALL, [ ('hash_code',", ":param sample_hint: (optional) Ignite data type, for whic the given", "API result data object. Contains zero status and an old", ") return query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if", "keys: list of keys or tuples of (key, key_hint), :param", "\"\"\" if not isinstance(peek_modes, (list, tuple)): if peek_modes == 0:", "only if the key already exists and value equals provided", "if binary else 0, 'key': key, 'sample': sample, }, response_config=[", "('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], query_id=query_id,", "Contains zero status if key-value pairs are written, non-zero status", "an item of representable Python type or a tuple of", "software distributed under the # License is distributed on an", "and an old value or None if a value is", "Contains zero status and a boolean success code, or non-zero", "Int), ('flag', Byte), ], query_id=query_id, ) return query_struct.perform( connection, query_params={", "Retrieves multiple key-value pairs from cache. :param connection: connection to", "cache (overwriting existing value if any). :param connection: connection to", "('value', Bool), ], ) if result.status == 0: result.value =", "ID of the cache, :param keys: a list of keys", ":param pairs: dictionary type parameters, contains key-value pairs to save.", "value indicating whether all given keys are present in cache.", "binary else 0, 'keys': keys, }, ) def cache_remove_key( connection:", "key, key_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Retrieves a", "under the License, as defined below, subject to # the", "zero status and a bool value retrieved on success: `True`", "result data object. Contains zero status if a value is", ":param value: new value for the given key, :param key_hint:", "whic the given sample should be converted :param binary: (optional)", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes an entry with", "are written, non-zero status and an error description otherwise. \"\"\"", "(PeekModes.ALL), :param binary: (optional) pass True to keep the value", "may not use this file except in compliance with the", "entry. Can be of any supported type, :param key_hint: (optional)", "keys: Iterable, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Retrieves multiple", "has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ ('hash_code',", "result.value = result.value['success'] return result def cache_replace_if_equals( connection: 'Connection', cache:", "# License is distributed on an \"AS IS\" BASIS, WITHOUT", "of the rights granted to you # under the License", "and an error description on failure. \"\"\" query_struct = Query(", "[ ('hash_code', Int), ('flag', Byte), ('peek_modes', PeekModes), ], query_id=query_id, )", "binary else 0, 'key': key, }, response_config=[ ('value', AnyDataObject), ],", "'data': pairs, }, ) def cache_contains_key( connection: 'Connection', cache: Union[str,", "and value equals provided sample. :param connection: connection to Ignite", "the key already exist. :param connection: connection to Ignite server,", "= Query( OP_CACHE_CLEAR_KEY, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "# # Licensed under the Apache License, Version 2.0 (the", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT_IF_ABSENT,", "on success, non-zero status and an error description otherwise. \"\"\"", "import Iterable, Union from pyignite.queries.op_codes import * from pyignite.datatypes import", "key_hint: (optional) Ignite data type, for which the given key", "('flag', Byte), ('key', key_hint or AnyDataObject), ], query_id=query_id, ) result", "data object. Contains zero status and a boolean success code,", "tuples, :param binary: pass True to keep the value in", "}, ) def cache_remove_all( connection: 'Connection', cache: Union[str, int], binary=False,", "if binary else 0, 'key': key, 'value': value, }, response_config=[", "object. Contains zero status and a boolean success code, or", "Can be of any supported type, :param key_hint: (optional) Ignite", "type hint) tuples, :param binary: pass True to keep the", "Contains zero status and an old value or None if", "'peek_modes': peek_modes, }, response_config=[ ('count', Long), ], ) if result.status", "cache (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), :param binary:", "sample_hint or AnyDataObject), ], query_id=query_id, ) result = query_struct.perform( connection,", "type, for whic the given sample should be converted :param", "keep the value in binary form. False by default, :param", "something has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REMOVE_KEY, [", "License must also include this Commons Clause # License Condition", "cache: Union[str, int], key, key_hint: object=None, binary=False, query_id=None, ) ->", "= Query( OP_CACHE_REMOVE_KEY, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "compliance with the License. You may obtain a # copy", "the License. You may obtain a # copy of the", "key, key_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Returns a", "0: result.value = result.value['success'] return result def cache_remove_if_equals( connection: 'Connection',", "result data object. Contains zero status on success, non-zero status", "('peek_modes', PeekModes), ], query_id=query_id, ) result = query_struct.perform( connection, query_params={", "binary else 0, 'key': key, 'sample': sample, 'value': value, },", "the cache without notifying listeners or cache writers. :param connection:", ") def cache_clear_key( connection: 'Connection', cache: Union[str, int], key, key_hint:", "return result def cache_get_and_put_if_absent( connection: 'Connection', cache: Union[str, int], key,", "on failure. \"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ ('hash_code', Int),", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "attribution required by the License must also include this Commons", "query_id=None, ) -> 'APIResult': \"\"\" Removes all entries from cache,", "'keys': keys, }, response_config=[ ('value', Bool), ], ) if result.status", "= Query( OP_CACHE_GET_AND_REPLACE, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "entry. Can be of any supported type, :param value: value", "otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR, [ ('hash_code', Int), ('flag',", "sample, }, response_config=[ ('success', Bool), ], ) if result.status ==", "a dict, made of retrieved key-value pairs, non-zero status and", "query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'data':", "near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache", "non-zero status and an error description on failure. \"\"\" query_struct", "result.status == 0: result.value = result.value['success'] return result def cache_clear(", "in binary form. False by default, :param query_id: a value", "pyignite.utils import cache_id def cache_put( connection: 'Connection', cache: Union[str, int],", "the Software. # For purposes of the foregoing, “Sell” means", "and a bool value retrieved on success: `True` when all", "and an error description in case of error. \"\"\" query_struct", "for that key. :param connection: connection to Ignite server, :param", "[peek_modes] query_struct = Query( OP_CACHE_GET_SIZE, [ ('hash_code', Int), ('flag', Byte),", "if and only if there is a value currently mapped", "primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to all", "previous value for that key, if and only if there", "for the specific language governing permissions # and limitations under", "Byte), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ],", "of the cache, :param keys: a list of keys or", "'flag': 1 if binary else 0, 'key': key, }, )", "if binary else 0, 'key': key, }, response_config=[ ('success', Bool),", "hint) tuples, :param binary: pass True to keep the value", "object. Contains zero status if a value is written, non-zero", "existing value if any). :param connection: connection to Ignite server,", "= Query( OP_CACHE_REMOVE_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()),", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT_ALL, [ ('hash_code', Int),", "== 0: result.value = result.value['success'] return result def cache_remove_if_equals( connection:", "which the given key should be converted, :param value_hint: (optional)", "int], key, key_hint: object=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\"", "from pyignite.queries import Query, Response from pyignite.utils import cache_id def", "case of error. \"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT, [ ('hash_code',", "# copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "value if there was not such key. :param connection: connection", "License for the specific language governing permissions # and limitations", "status and an old value or None, non-zero status and", "keys or tuples of (key, key_hint), :param binary: (optional) pass", "[ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id, )", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes the cache entry", "Puts a value with a given key to cache, returning", "= Query( OP_CACHE_CLEAR, [ ('hash_code', Int), ('flag', Byte), ], query_id=query_id,", "connection: 'Connection', cache: Union[str, int], keys: list, binary=False, query_id=None, )", "to cache only if the key does not already exist.", "the # License is distributed on an \"AS IS\" BASIS,", "return result def cache_contains_keys( connection: 'Connection', cache: Union[str, int], keys:", "Software), a product or # service whose value derives, entirely", "governing permissions # and limitations under the License. # #", "cache_replace_if_equals( connection: 'Connection', cache: Union[str, int], key, sample, value, key_hint=None,", "of the Software. # Any license notice or attribution required", "has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ ('hash_code',", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT, [", "the GridGain Community # Edition software provided with this notice.", "value with a given key to cache (overwriting existing value", "be converted, :param value_hint: (optional) Ignite data type, for which", "cache, :param binary: (optional) pass True to keep the value", "zero status and a dict, made of retrieved key-value pairs,", "or AnyDataObject), ('sample', sample_hint or AnyDataObject), ('value', value_hint or AnyDataObject),", "def cache_remove_if_equals( connection: 'Connection', cache: Union[str, int], key, sample, key_hint=None,", "-> 'APIResult': \"\"\" Returns a value indicating whether given key", "AnyDataObject), ], query_id=query_id, ) result = query_struct.perform( connection, query_params={ 'hash_code':", "Version 2.0 (the \"License\") modified with Commons Clause # Restriction;", "cache_get_all( connection: 'Connection', cache: Union[str, int], keys: Iterable, binary=False, query_id=None,", "cache_clear_keys( connection: 'Connection', cache: Union[str, int], keys: list, binary=False, query_id=None,", "in cache. :param connection: connection to Ignite server, :param cache:", "old value or None if a value is written, non-zero", "of the cache, :param keys: list of keys or tuples", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ ('hash_code',", "result.value = result.value['value'] return result def cache_get_and_put( connection: 'Connection', cache:", "= result.value['value'] return result def cache_replace( connection: 'Connection', cache: Union[str,", "'APIResult': \"\"\" Returns a value indicating whether given key is", ") if result.status == 0: result.value = result.value['count'] return result", "AnyDataObject), ], ) if result.status != 0: return result result.value", "= Query( OP_CACHE_GET_ALL, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()),", "service whose value derives, entirely or substantially, from the functionality", "if there was not such key. :param connection: connection to", "object. Contains zero status and an old value or None,", "an error description on failure. \"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEYS,", "status on success, non-zero status and an error description otherwise.", "or attribution required by the License must also include this", "cache. :param connection: connection to Ignite server, :param cache: name", "== 0: result.value = result.value['value'] return result def cache_get_and_put( connection:", "“Sell” means practicing any or all of the rights granted", "else 0, 'keys': keys, }, response_config=[ ('data', Map), ], )", "result result.value = result.value['value'] return result def cache_get_all( connection: 'Connection',", "OP_CACHE_CLEAR, [ ('hash_code', Int), ('flag', Byte), ], query_id=query_id, ) return", "int], keys: Iterable, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes", "Union[str, int], peek_modes=0, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Gets", "value if any). :param connection: connection to Ignite server, :param", "'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'keys': keys,", "'Connection', cache: Union[str, int], binary=False, query_id=None, ) -> 'APIResult': \"\"\"", "equals provided sample. :param connection: connection to Ignite server, :param", "the cache entry, :param key_hint: (optional) Ignite data type, for", "'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'key': key,", "When the parameter is omitted, a random value is generated,", "with given keys, notifying listeners and cache writers. :param connection:", "# and limitations under the License. # # Commons Clause", "fees for hosting or consulting/ support services related to the", "third parties, for a fee or other consideration (including without", "value_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Puts a value", "related to the Software), a product or # service whose", "a given key to cache only if the key does", "\"\"\" query_struct = Query( OP_CACHE_CLEAR_KEYS, [ ('hash_code', Int), ('flag', Byte),", "from pyignite.datatypes.key_value import PeekModes from pyignite.queries import Query, Response from", "\"\"\" Clears the cache without notifying listeners or cache writers.", "non-zero status and an error description otherwise. \"\"\" if not", "int], binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears the cache", "cache, and returns the previous value for that key, or", "value: value for the key, :param key_hint: (optional) Ignite data", "parties, for a fee or other consideration (including without #", "a random value is generated, :return: API result data object.", "compare the stored value with, :param key_hint: (optional) Ignite data", "License, Version 2.0 (the \"License\") modified with Commons Clause #", "zero status and an old value or None on success,", "pairs to save. Each key or value can be an", "notice. from typing import Iterable, Union from pyignite.queries.op_codes import *", "bool value retrieved on success: `True` when key is present,", "'Connection', cache: Union[str, int], key, sample, value, key_hint=None, sample_hint=None, value_hint=None,", "which the given value should be converted, :param binary: (optional)", "wrong. \"\"\" query_struct = Query( OP_CACHE_REPLACE, [ ('hash_code', Int), ('flag',", "query_id=None, ) -> 'APIResult': \"\"\" Retrieves multiple key-value pairs from", "Clause Restriction # # The Software is provided to you", ":param cache: name or ID of the cache, :param key:", "def cache_remove_key( connection: 'Connection', cache: Union[str, int], key, key_hint: object=None,", "entry with specified key, returning the value. :param connection: connection", "Ignite data type, for which the given value should be", "to # the following condition. # # Without limiting other", "OP_CACHE_GET_AND_PUT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", ") def cache_remove_all( connection: 'Connection', cache: Union[str, int], binary=False, query_id=None,", "Community Edition Licensing # Copyright 2019 GridGain Systems, Inc. #", "-> 'APIResult': \"\"\" Removes an entry with a given key", "Byte), ('keys', AnyDataArray()), ], query_id=query_id, ) return query_struct.perform( connection, query_params={", "query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "on failure. \"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEY, [ ('hash_code', Int),", "# limitation fees for hosting or consulting/ support services related", "= Query( OP_CACHE_GET, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "Query( OP_CACHE_REMOVE_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ],", "'APIResult': \"\"\" Gets the number of entries in cache. :param", "binary else 0, 'keys': keys, }, response_config=[ ('data', Map), ],", "to actual value, notifying listeners and cache writers. :param connection:", "key for the cache entry, :param sample: a sample to", "existing associations if any). :param connection: connection to Ignite server,", "gone wrong. \"\"\" query_struct = Query( OP_CACHE_REPLACE, [ ('hash_code', Int),", "the License does not grant to you, the right to", "to compare the stored value with, :param value: new value", "connection: connection to Ignite server, :param cache: name or ID", "= result.value['value'] return result def cache_get_all( connection: 'Connection', cache: Union[str,", "key-value pairs, non-zero status and an error description on failure.", "if something has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REPLACE,", "('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ('value', value_hint", "a value with a given key to cache, and returns", "int], keys: Iterable, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Returns", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT,", "the cache entry. Can be of any supported type, :param", "value with, :param key_hint: (optional) Ignite data type, for which", "Query( OP_CACHE_GET_AND_PUT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "if binary else 0, 'key': key, 'sample': sample, 'value': value,", "AnyDataObject), ('value', value_hint or AnyDataObject), ], query_id=query_id, ) return query_struct.perform(connection,", "generated, :return: API result data object. Contains zero status if", "Bool), ], ) if result.status == 0: result.value = result.value['success']", "int], key, key_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes", "that key. :param connection: connection to Ignite server, :param cache:", "result data object. Contains zero status and a value retrieved", "0, }, ) def cache_clear_key( connection: 'Connection', cache: Union[str, int],", "the given key should be converted, :param sample_hint: (optional) Ignite", "value should be converted. :param binary: pass True to keep", "when key is present, `False` otherwise, non-zero status and an", "key, }, response_config=[ ('value', AnyDataObject), ], ) if result.status ==", "something has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [", "or non-zero status and an error description if something has", "value, }, response_config=[ ('success', Bool), ], ) if result.status ==", "result.status == 0: result.value = result.value['success'] return result def cache_replace_if_equals(", "Contains zero status and a value retrieved on success, non-zero", "Python type or a tuple of (item, hint), :param binary:", "sample_hint: (optional) Ignite data type, for whic the given sample", "to cache only if the key already exist. :param connection:", "otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ ('hash_code', Int), ('flag',", "# Unless required by applicable law or agreed to in", "hint), :param binary: (optional) pass True to keep the value", "given sample should be converted :param binary: (optional) pass True", "Systems, Inc. # # Licensed under the Apache License, Version", "value for that key, or null value if there was", "'APIResult': \"\"\" Clears the cache key without notifying listeners or", "of (key, key_hint), :param binary: (optional) pass True to keep", "key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ('value', value_hint or", "key is present, `False` otherwise, non-zero status and an error", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "Contains zero status and an old value or None, non-zero", "boolean success code, or non-zero status and an error description", "data object. Contains zero status and an old value or", "a sample to compare the stored value with, :param value:", "error description if something has gone wrong. \"\"\" query_struct =", "and the License does not grant to you, the right", "'key': key, 'value': value, }, response_config=[ ('value', AnyDataObject), ], )", "cache_get_size( connection: 'Connection', cache: Union[str, int], peek_modes=0, binary=False, query_id=None, )", "stored value with, :param value: new value for the given", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "connection: 'Connection', cache: Union[str, int], peek_modes=0, binary=False, query_id=None, ) ->", "(item, hint), :param binary: (optional) pass True to keep the", "'key': key, 'value': value, }) def cache_get( connection: 'Connection', cache:", "a value with a given key to cache only if", "'Connection', cache: Union[str, int], keys: Iterable, binary=False, query_id=None, ) ->", "only if the key does not already exist. :param connection:", "result def cache_replace_if_equals( connection: 'Connection', cache: Union[str, int], key, sample,", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes all entries from", "\"\"\" Puts a value with a given key to cache,", "Contains zero status and a bool value retrieved on success:", "on failure. \"\"\" query_struct = Query( OP_CACHE_GET, [ ('hash_code', Int),", "result.status == 0: result.value = result.value['value'] return result def cache_get_and_replace(", "a boolean success code, or non-zero status and an error", "cache: Union[str, int], pairs: dict, binary=False, query_id=None, ) -> 'APIResult':", "cache without notifying listeners or cache writers. :param connection: connection", "entries from cache, notifying listeners and cache writers. :param connection:", "Software is provided to you by the Licensor under the", "the Licensor under the License, as defined below, subject to", "('hash_code', Int), ('flag', Byte), ], query_id=query_id, ) return query_struct.perform( connection,", "backup cache (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), :param", "with specified key, returning the value. :param connection: connection to", "For purposes of the clause above, the “Licensor” is Copyright", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR, [", "Contains zero status if a value is written, non-zero status", "in case of error. \"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT, [", "result.value = result.value['success'] return result def cache_get_and_put_if_absent( connection: 'Connection', cache:", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "1 if binary else 0, 'key': key, }, ) def", "query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'peek_modes':", "], ) if result.status == 0: result.value = result.value['value'] return", "given key to cache only if the key already exists", "otherwise, non-zero status and an error description on failure. \"\"\"", "binary: pass True to keep the value in binary form.", "Any license notice or attribution required by the License must", "'flag': 1 if binary else 0, 'key': key, 'sample': sample,", "language governing permissions # and limitations under the License. #", "key for the cache entry, :param key_hint: (optional) Ignite data", "or # service whose value derives, entirely or substantially, from", "the License to provide to third parties, for a fee", "otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT_ALL, [ ('hash_code', Int), ('flag',", "result def cache_get_and_put( connection: 'Connection', cache: Union[str, int], key, value,", "the License, the grant of rights under the License will", "purposes of the foregoing, “Sell” means practicing any or all", "binary else 0, }, ) def cache_get_size( connection: 'Connection', cache:", "You may obtain a # copy of the License at", "('data', Map), ], query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code':", "of entries in cache. :param connection: connection to Ignite server,", "Long, AnyDataArray, AnyDataObject, ) from pyignite.datatypes.key_value import PeekModes from pyignite.queries", "this file except in compliance with the License. You may", "Puts a value with a given key to cache, and", "the foregoing, “Sell” means practicing any or all of the", "the parameter is omitted, a random value is generated, :return:", "1 if binary else 0, 'keys': keys, }, response_config=[ ('data',", "query_id=query_id, ) result = query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag':", "connection: 'Connection', cache: Union[str, int], key, key_hint: object=None, binary=False, query_id=None,", "0, }, ) def cache_get_size( connection: 'Connection', cache: Union[str, int],", "this Commons Clause # License Condition notice. # # For", "== 0: result.value = result.value['value'] return result def cache_get_and_remove( connection:", "success code, or non-zero status and an error description if", "compare the stored value with, :param value: new value for", "this notice. from typing import Iterable, Union from pyignite.queries.op_codes import", "supported type, :param value: value for the key, :param key_hint:", "the given value should be converted, :param binary: (optional) pass", "Union[str, int], keys: list, binary=False, query_id=None, ) -> 'APIResult': \"\"\"", "object. Contains zero status if key-value pairs are written, non-zero", "file except in compliance with the License. You may obtain", "are present, `False` otherwise, non-zero status and an error description", "the cache, :param peek_modes: (optional) limit count to near cache", "= Query( OP_CACHE_CONTAINS_KEY, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "binary else 0, 'peek_modes': peek_modes, }, response_config=[ ('count', Long), ],", "else 0, 'peek_modes': peek_modes, }, response_config=[ ('count', Long), ], )", "('flag', Byte), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject),", "value retrieved on success: `True` when all keys are present,", "value with a given key to cache only if the", "'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'peek_modes': peek_modes,", "# The Software is provided to you by the Licensor", "AnyDataObject), ('sample', sample_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ],", "the value in binary form. False by default, :param query_id:", "data object. Contains zero status and a value retrieved on", "value equals provided sample. :param connection: connection to Ignite server,", "of the clause above, the “Licensor” is Copyright 2019 GridGain", "}, response_config=[ ('value', AnyDataObject), ], ) if result.status != 0:", "non-zero status and an error description in case of error.", "query_id=None, ) -> 'APIResult': \"\"\" Clears the cache without notifying", "cache_clear_key( connection: 'Connection', cache: Union[str, int], key, key_hint: object=None, binary=False,", "-> 'APIResult': \"\"\" Clears the cache keys without notifying listeners", "under the # License is distributed on an \"AS IS\"", "must also include this Commons Clause # License Condition notice.", "with this notice. from typing import Iterable, Union from pyignite.queries.op_codes", "the following condition. # # Without limiting other conditions in", "substantially, from the functionality of the Software. # Any license", "Ignite server, :param cache: name or ID of the cache,", "foregoing, “Sell” means practicing any or all of the rights", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_REMOVE,", "old value or None, non-zero status and an error description", "be converted, :param binary: (optional) pass True to keep the", "wrong. \"\"\" query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ ('hash_code', Int), ('flag',", "which the given key should be converted, :param sample_hint: (optional)", "= [peek_modes] query_struct = Query( OP_CACHE_GET_SIZE, [ ('hash_code', Int), ('flag',", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_REMOVE_ALL, [ ('hash_code', Int),", "converted :param value_hint: (optional) Ignite data type, for which the", "`False` otherwise, non-zero status and an error description on failure.", "Retrieves a value from cache by key. :param connection: connection", "cache_id(cache), 'flag': 1 if binary else 0, 'key': key, 'value':", "int], pairs: dict, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Puts", "with a given key if provided value is equal to", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Gets the number of", "or ID of the cache, :param keys: list of keys", "provided value is equal to actual value, notifying listeners and", "with Commons Clause # Restriction; you may not use this", "keys: list, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears the", "result.status == 0: result.value = result.value['success'] return result def cache_get_and_put_if_absent(", "binary form. False by default, :param query_id: a value generated", "binary else 0, 'data': pairs, }, ) def cache_contains_key( connection:", "a bool value retrieved on success: `True` when key is", "and a number of cache entries on success, non-zero status", "key, 'sample': sample, 'value': value, }, response_config=[ ('success', Bool), ],", "and the Software is the GridGain Community # Edition software", "'key': key, }, response_config=[ ('value', AnyDataObject), ], ) if result.status", "'flag': 1 if binary else 0, 'peek_modes': peek_modes, }, response_config=[", "key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], query_id=query_id, )", "status and an old value or None on success, non-zero", "\"\"\" query_struct = Query( OP_CACHE_GET_ALL, [ ('hash_code', Int), ('flag', Byte),", "object. Contains zero status and a dict, made of retrieved", "use this file except in compliance with the License. You", "dict, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Puts multiple key-value", "sample_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes an entry", "given key should be converted, :param binary: pass True to", "isinstance(peek_modes, (list, tuple)): if peek_modes == 0: peek_modes = []", "AnyDataArray()), ], query_id=query_id, ) result = query_struct.perform( connection, query_params={ 'hash_code':", "sample_hint=None, value_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Puts a", ":param keys: a list of keys or (key, type hint)", "See the License for the specific language governing permissions #", "the cache, :param key: key for the cache entry, :param", "value retrieved on success, non-zero status and an error description", "include this Commons Clause # License Condition notice. # #", "key, }, response_config=[ ('value', Bool), ], ) if result.status ==", "entry, :param key_hint: (optional) Ignite data type, for which the", "key_hint or AnyDataObject), ], query_id=query_id, ) result = query_struct.perform( connection,", "('flag', Byte), ('data', Map), ], query_id=query_id, ) return query_struct.perform( connection,", "1 if binary else 0, 'key': key, 'sample': sample, 'value':", "query_struct = Query( OP_CACHE_CONTAINS_KEY, [ ('hash_code', Int), ('flag', Byte), ('key',", "indicating whether given key is present in cache. :param connection:", "[] else: peek_modes = [peek_modes] query_struct = Query( OP_CACHE_GET_SIZE, [", "listeners and cache writers. :param connection: connection to Ignite server,", "type, for which the given value should be converted, :param", "from cache. :param connection: connection to Ignite server, :param cache:", "else 0, 'keys': keys, }, response_config=[ ('value', Bool), ], )", "for which the given value should be converted, :param binary:", "0: result.value = result.value['success'] return result def cache_clear( connection: 'Connection',", "('key', key_hint or AnyDataObject), ], query_id=query_id, ) result = query_struct.perform(", "'APIResult': \"\"\" Clears the cache keys without notifying listeners or", "hosting or consulting/ support services related to the Software), a", "license notice or attribution required by the License must also", "in response.query_id. When the parameter is omitted, a random value", "result data object. Contains zero status and an old value", "value, key_hint=None, value_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Puts", "otherwise. \"\"\" query_struct = Query( OP_CACHE_REMOVE_ALL, [ ('hash_code', Int), ('flag',", "result.status == 0: result.value = result.value['success'] return result def cache_remove_if_equals(", "0: result.value = result.value['value'] return result def cache_get_and_remove( connection: 'Connection',", "if result.status != 0: return result result.value = result.value['value'] return", "else 0, 'key': key, 'sample': sample, 'value': value, }, response_config=[", "result def cache_get_and_remove( connection: 'Connection', cache: Union[str, int], key, key_hint=None,", "'APIResult': \"\"\" Puts a value with a given key to", "of the cache, :param peek_modes: (optional) limit count to near", "Query( OP_CACHE_REMOVE_ALL, [ ('hash_code', Int), ('flag', Byte), ], query_id=query_id, )", "condition. # # Without limiting other conditions in the License,", "query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys',", "to you, the right to Sell the Software. # For", ") -> 'APIResult': \"\"\" Removes the cache entry with specified", "only if the key already exist. :param connection: connection to", "return result def cache_replace_if_equals( connection: 'Connection', cache: Union[str, int], key,", "Response from pyignite.utils import cache_id def cache_put( connection: 'Connection', cache:", "\"License\") modified with Commons Clause # Restriction; you may not", "\"\"\" query_struct = Query( OP_CACHE_PUT_ALL, [ ('hash_code', Int), ('flag', Byte),", "a tuple of (item, hint), :param binary: (optional) pass True", "or AnyDataObject), ('value', value_hint or AnyDataObject), ], query_id=query_id, ) return", "API result data object. Contains zero status and a bool", "or implied. See the License for the specific language governing", "binary else 0, 'key': key, 'value': value, }) def cache_get(", "KIND, either express or implied. See the License for the", "if binary else 0, 'peek_modes': peek_modes, }, response_config=[ ('count', Long),", "converted, :param binary: pass True to keep the value in", "currently mapped for that key. :param connection: connection to Ignite", "under the License to provide to third parties, for a", "converted. :param binary: pass True to keep the value in", "query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'keys':", "status and a value retrieved on success, non-zero status and", "cache_get_and_put_if_absent( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None, value_hint=None,", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "given sample should be converted :param value_hint: (optional) Ignite data", "key already exist. :param connection: connection to Ignite server, :param", "# For purposes of the clause above, the “Licensor” is", "], ) if result.status == 0: result.value = dict(result.value)['data'] return", "OP_CACHE_REMOVE_ALL, [ ('hash_code', Int), ('flag', Byte), ], query_id=query_id, ) return", "Puts a value with a given key to cache (overwriting", "key or value can be an item of representable Python", "'flag': 1 if binary else 0, 'data': pairs, }, )", "-> 'APIResult': \"\"\" Puts a value with a given key", "int], keys: Iterable, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Retrieves", "key, 'sample': sample, }, response_config=[ ('success', Bool), ], ) if", "key: key for the cache entry. Can be of any", "= result.value['success'] return result def cache_remove_keys( connection: 'Connection', cache: Union[str,", "of any supported type, :param key_hint: (optional) Ignite data type,", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT, [ ('hash_code', Int),", "Without limiting other conditions in the License, the grant of", "all entries from cache, notifying listeners and cache writers. :param", "when all keys are present, `False` otherwise, non-zero status and", "in binary form. False by default, :param query_id: (optional) a", "Inc. # # Licensed under the Apache License, Version 2.0", "limitations under the License. # # Commons Clause Restriction #", "wrong. \"\"\" query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ ('hash_code', Int), ('flag',", "License. You may obtain a # copy of the License", "key should be converted, :param sample_hint: (optional) Ignite data type,", "'sample': sample, 'value': value, }, response_config=[ ('success', Bool), ], )", "def cache_get_size( connection: 'Connection', cache: Union[str, int], peek_modes=0, binary=False, query_id=None,", "result def cache_remove_keys( connection: 'Connection', cache: Union[str, int], keys: Iterable,", "implied. See the License for the specific language governing permissions", "returning the value. :param connection: connection to Ignite server, :param", "keys, }, response_config=[ ('value', Bool), ], ) if result.status ==", "Map), ], ) if result.status == 0: result.value = dict(result.value)['data']", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Retrieves multiple key-value pairs", "keys or (key, type hint) tuples, :param binary: pass True", "of the cache, :param binary: (optional) pass True to keep", "\"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEY, [ ('hash_code', Int), ('flag', Byte),", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "and a dict, made of retrieved key-value pairs, non-zero status", "query_id=None, ) -> 'APIResult': \"\"\" Removes entries with given keys,", "result.status == 0: result.value = result.value['value'] return result def cache_get_and_put(", "OP_CACHE_PUT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "result.status != 0: return result result.value = result.value['value'] return result", "contains key-value pairs to save. Each key or value can", "0, 'key': key, 'sample': sample, 'value': value, }, response_config=[ ('success',", "= result.value['value'] return result def cache_get_and_remove( connection: 'Connection', cache: Union[str,", "description on failure. \"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ ('hash_code',", "there was not such key. :param connection: connection to Ignite", "to cache, returning previous value for that key, if and", "Clause # Restriction; you may not use this file except", "of the foregoing, “Sell” means practicing any or all of", "pyignite.datatypes import ( Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject,", "result data object. Contains zero status and a bool value", "OP_CACHE_REMOVE_KEY, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "a fee or other consideration (including without # limitation fees", "key, 'value': value, }, response_config=[ ('success', Bool), ], ) if", "status if key-value pairs are written, non-zero status and an", "and returns the previous value for that key, or null", "result data object. Contains zero status and a number of", "Union[str, int], binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears the", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_REMOVE_ALL,", "given key should be converted, :param sample_hint: (optional) Ignite data", "and limitations under the License. # # Commons Clause Restriction", "== 0: result.value = result.value['success'] return result def cache_get_and_put_if_absent( connection:", "value currently mapped for that key. :param connection: connection to", "also include this Commons Clause # License Condition notice. #", "PeekModes from pyignite.queries import Query, Response from pyignite.utils import cache_id", "name or ID of the cache, :param key: key for", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR_KEY,", "all cache partitions (PeekModes.ALL), :param binary: (optional) pass True to", "\"\"\" Retrieves multiple key-value pairs from cache. :param connection: connection", "sample to compare the stored value with, :param value: new", "or null value if there was not such key. :param", "result.status == 0: result.value = dict(result.value)['data'] return result def cache_put_all(", ":param query_id: (optional) a value generated by client and returned", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ('value', value_hint", "keys are present in cache. :param connection: connection to Ignite", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR_KEYS, [", "parameters, contains key-value pairs to save. Each key or value", "}, ) def cache_get_size( connection: 'Connection', cache: Union[str, int], peek_modes=0,", "key, :param key_hint: (optional) Ignite data type, for which the", "result.value = result.value['success'] return result def cache_remove_keys( connection: 'Connection', cache:", "of rights under the License will not # include, and", "given key to cache (overwriting existing value if any). :param", "int], key, value, key_hint=None, value_hint=None, binary=False, query_id=None, ) -> 'APIResult':", "an entry with a given key if provided value is", "binary else 0, }, ) def cache_clear_key( connection: 'Connection', cache:", "either express or implied. See the License for the specific", "if result.status == 0: result.value = result.value['success'] return result def", "by the License must also include this Commons Clause #", "to in writing, software distributed under the # License is", "cache_id(cache), 'flag': 1 if binary else 0, }, ) def", "the “License” is the Apache License, Version 2.0, and the", "result.value = dict(result.value)['data'] return result def cache_put_all( connection: 'Connection', cache:", "keys, }, ) def cache_remove_all( connection: 'Connection', cache: Union[str, int],", "cache partitions (PeekModes.ALL), :param binary: (optional) pass True to keep", "keys: Iterable, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes entries", "Contains zero status and a dict, made of retrieved key-value", "key_hint=None, value_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Puts a", "= [] else: peek_modes = [peek_modes] query_struct = Query( OP_CACHE_GET_SIZE,", "cache: name or ID of the cache, :param peek_modes: (optional)", "provided with this notice. from typing import Iterable, Union from", "key, or null value if there was not such key.", "include, and the License does not grant to you, the", "to cache (overwriting existing associations if any). :param connection: connection", "present in cache. :param connection: connection to Ignite server, :param", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR_KEY, [", "written, non-zero status and an error description otherwise. \"\"\" query_struct", "Map), ], query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache),", "query_struct.perform(connection, { 'hash_code': cache_id(cache), 'flag': 1 if binary else 0,", "success: `True` when key is present, `False` otherwise, non-zero status", "value with, :param value: new value for the given key,", "cache writers. :param connection: connection to Ignite server, :param cache:", "return result def cache_get_all( connection: 'Connection', cache: Union[str, int], keys:", "# License Condition notice. # # For purposes of the", "a sample to compare the stored value with, :param key_hint:", "else 0, 'key': key, }, response_config=[ ('success', Bool), ], )", "to cache only if the key already exists and value", "cache: Union[str, int], binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes", "AnyDataArray, AnyDataObject, ) from pyignite.datatypes.key_value import PeekModes from pyignite.queries import", ") -> 'APIResult': \"\"\" Clears the cache without notifying listeners", "cache, notifying listeners and cache writers. :param connection: connection to", "], ) if result.status != 0: return result result.value =", "return result def cache_get_and_put( connection: 'Connection', cache: Union[str, int], key,", "description on failure. \"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEY, [ ('hash_code',", "# # The Software is provided to you by the", "cache: Union[str, int], key, value, key_hint=None, value_hint=None, binary=False, query_id=None, )", "if binary else 0, 'keys': keys, }, response_config=[ ('data', Map),", "status and an error description otherwise. \"\"\" query_struct = Query(", "query_id=None, ) -> 'APIResult': \"\"\" Clears the cache keys without", "result.status == 0: result.value = result.value['value'] return result def cache_replace(", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes entries with given", "return result def cache_get_and_replace( connection: 'Connection', cache: Union[str, int], key,", "sample to compare the stored value with, :param key_hint: (optional)", "is omitted, a random value is generated, :return: API result", "Edition software provided with this notice. from typing import Iterable,", "response_config=[ ('value', Bool), ], ) if result.status == 0: result.value", "= query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary", "be of any supported type, :param value: value for the", "# include, and the License does not grant to you,", "Removes entries with given keys, notifying listeners and cache writers.", "Union[str, int], key, key_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\"", "any supported type, :param key_hint: (optional) Ignite data type, for", "cache_id(cache), 'flag': 1 if binary else 0, 'key': key, 'sample':", "the Apache License, Version 2.0, and the Software is the", "to you by the Licensor under the License, as defined", "Removes an entry with a given key if provided value", "otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT, [ ('hash_code', Int), ('flag',", "if there is a value currently mapped for that key.", "Query( OP_CACHE_GET_AND_REMOVE, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "ANY # KIND, either express or implied. See the License", "cache_id(cache), 'flag': 1 if binary else 0, 'data': pairs, },", ") -> 'APIResult': \"\"\" Returns a value indicating whether given", "for which the given value should be converted. :param binary:", "query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, },", "= Query( OP_CACHE_PUT_ALL, [ ('hash_code', Int), ('flag', Byte), ('data', Map),", "'APIResult': \"\"\" Removes all entries from cache, notifying listeners and", "Query( OP_CACHE_CLEAR_KEY, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "zero status and an old value or None, non-zero status", "writing, software distributed under the # License is distributed on", "(optional) Ignite data type, for which the given value should", "return result def cache_remove_keys( connection: 'Connection', cache: Union[str, int], keys:", "Ignite data type, for which the given key should be", "'APIResult': \"\"\" Retrieves a value from cache by key. :param", "binary: (optional) pass True to keep the value in binary", "('flag', Byte), ('key', key_hint or AnyDataObject), ], query_id=query_id, ) return", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR_KEY, [ ('hash_code', Int),", "[ ('hash_code', Int), ('flag', Byte), ('data', Map), ], query_id=query_id, )", "practicing any or all of the rights granted to you", "('flag', Byte), ], query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code':", "License to provide to third parties, for a fee or", "if binary else 0, 'keys': keys, }, ) def cache_remove_all(", "keys: a list of keys or (key, type hint) tuples,", "the cache, :param binary: (optional) pass True to keep the", "given key to cache, returning previous value for that key,", "Query( OP_CACHE_REMOVE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "to provide to third parties, for a fee or other", "a given key to cache only if the key already", "type, :param key_hint: (optional) Ignite data type, for which the", ") def cache_remove_key( connection: 'Connection', cache: Union[str, int], key, key_hint:", "to you # under the License to provide to third", "the specific language governing permissions # and limitations under the", "not grant to you, the right to Sell the Software.", "Union from pyignite.queries.op_codes import * from pyignite.datatypes import ( Map,", "pairs from cache. :param connection: connection to Ignite server, :param", "pass True to keep the value in binary form. False", "import cache_id def cache_put( connection: 'Connection', cache: Union[str, int], key,", "given keys, notifying listeners and cache writers. :param connection: connection", "0, 'key': key, }, response_config=[ ('success', Bool), ], ) if", "and returned as-is in response.query_id. When the parameter is omitted,", "else 0, 'key': key, 'value': value, }, response_config=[ ('success', Bool),", "to third parties, for a fee or other consideration (including", "cache: Union[str, int], key, sample, key_hint=None, sample_hint=None, binary=False, query_id=None, )", "a value is written, non-zero status and an error description", "('flag', Byte), ('peek_modes', PeekModes), ], query_id=query_id, ) result = query_struct.perform(", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ ('hash_code',", "-> 'APIResult': \"\"\" Clears the cache key without notifying listeners", "Query( OP_CACHE_PUT_ALL, [ ('hash_code', Int), ('flag', Byte), ('data', Map), ],", "binary else 0, 'key': key, 'value': value, }, response_config=[ ('value',", "== 0: result.value = result.value['success'] return result def cache_replace_if_equals( connection:", "else 0, 'key': key, }, response_config=[ ('value', Bool), ], )", "or ID of the cache, :param peek_modes: (optional) limit count", "0, 'key': key, }, response_config=[ ('value', AnyDataObject), ], ) if", "should be converted, :param binary: (optional) pass True to keep", "1 if binary else 0, }, ) def cache_get_size( connection:", "# the “License” is the Apache License, Version 2.0, and", "is provided to you by the Licensor under the License,", "value should be converted. :param binary: (optional) pass True to", "query_id=query_id, ) return query_struct.perform(connection, { 'hash_code': cache_id(cache), 'flag': 1 if", "Ignite data type, for whic the given sample should be", "= result.value['value'] return result def cache_put_if_absent( connection: 'Connection', cache: Union[str,", "be of any supported type, :param key_hint: (optional) Ignite data", "agreed to in writing, software distributed under the # License", "Union[str, int], key, sample, key_hint=None, sample_hint=None, binary=False, query_id=None, ) ->", "is a value currently mapped for that key. :param connection:", "result data object. Contains zero status and a boolean success", "Returns a value indicating whether given key is present in", "right to Sell the Software. # For purposes of the", "Licensed under the Apache License, Version 2.0 (the \"License\") modified", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "provide to third parties, for a fee or other consideration", "= Query( OP_CACHE_GET_SIZE, [ ('hash_code', Int), ('flag', Byte), ('peek_modes', PeekModes),", "if the key does not already exist. :param connection: connection", "if binary else 0, }, ) def cache_get_size( connection: 'Connection',", "keys, notifying listeners and cache writers. :param connection: connection to", "return query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary", "Software is the GridGain Community # Edition software provided with", "pyignite.queries.op_codes import * from pyignite.datatypes import ( Map, Bool, Byte,", "dict, made of retrieved key-value pairs, non-zero status and an", "type or a tuple of (item, hint), :param binary: (optional)", "def cache_get_and_put_if_absent( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None,", "'key': key, 'sample': sample, }, response_config=[ ('success', Bool), ], )", "\"\"\" Returns a value indicating whether all given keys are", "failure. \"\"\" query_struct = Query( OP_CACHE_GET, [ ('hash_code', Int), ('flag',", "written, non-zero status and an error description in case of", "('flag', Byte), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject),", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [", "is the GridGain Community # Edition software provided with this", "applicable law or agreed to in writing, software distributed under", "in the License, the grant of rights under the License", "any). :param connection: connection to Ignite server, :param cache: name", "error description on failure. \"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEYS, [", "that key, if and only if there is a value", "cache: name or ID of the cache, :param binary: (optional)", "= Query( OP_CACHE_GET_AND_REMOVE, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "query_struct = Query( OP_CACHE_CLEAR, [ ('hash_code', Int), ('flag', Byte), ],", "Byte), ('data', Map), ], query_id=query_id, ) return query_struct.perform( connection, query_params={", "connection: 'Connection', cache: Union[str, int], keys: Iterable, binary=False, query_id=None, )", "or ID of the cache, :param key: key for the", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Puts a value with", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ ('hash_code', Int),", "value, }) def cache_get( connection: 'Connection', cache: Union[str, int], key,", "False by default, :param query_id: a value generated by client", "“License” is the Apache License, Version 2.0, and the Software", "key does not already exist. :param connection: connection to Ignite", "any supported type, :param value: value for the key, :param", "error description on failure. \"\"\" query_struct = Query( OP_CACHE_GET_ALL, [", "result.value = result.value['value'] return result def cache_put_if_absent( connection: 'Connection', cache:", "key_hint=None, sample_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes an", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears the cache without", "by default, :param query_id: a value generated by client and", "'Connection', cache: Union[str, int], key, sample, key_hint=None, sample_hint=None, binary=False, query_id=None,", "2.0, and the Software is the GridGain Community # Edition", "\"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ ('hash_code', Int), ('flag', Byte),", "'keys': keys, }, response_config=[ ('data', Map), ], ) if result.status", "equal to actual value, notifying listeners and cache writers. :param", "not already exist. :param connection: connection to Ignite server, :param", "or tuples of (key, key_hint), :param binary: (optional) pass True", "otherwise. \"\"\" query_struct = Query( OP_CACHE_REMOVE_KEYS, [ ('hash_code', Int), ('flag',", "result.value['value'] return result def cache_get_and_replace( connection: 'Connection', cache: Union[str, int],", "\"\"\" query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ ('hash_code', Int), ('flag', Byte),", "== 0: result.value = result.value['value'] return result def cache_replace( connection:", "], query_id=query_id, ) result = query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache),", "on success: `True` when all keys are present, `False` otherwise,", "of cache entries on success, non-zero status and an error", "if binary else 0, 'data': pairs, }, ) def cache_contains_key(", "or None, non-zero status and an error description otherwise. \"\"\"", "}, response_config=[ ('count', Long), ], ) if result.status == 0:", "pairs are written, non-zero status and an error description otherwise.", "grant to you, the right to Sell the Software. #", "key, value, key_hint=None, value_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\"", "Systems, Inc., # the “License” is the Apache License, Version", "stored value with, :param key_hint: (optional) Ignite data type, for", "None if a value is written, non-zero status and an", "cache_get_and_remove( connection: 'Connection', cache: Union[str, int], key, key_hint=None, binary=False, query_id=None,", "status and a dict, made of retrieved key-value pairs, non-zero", "== 0: result.value = result.value['value'] return result def cache_contains_keys( connection:", "1 if binary else 0, }, ) def cache_clear_key( connection:", "retrieved on success: `True` when all keys are present, `False`", "cache entry. Can be of any supported type, :param key_hint:", "!= 0: return result result.value = result.value['value'] return result def", "def cache_remove_keys( connection: 'Connection', cache: Union[str, int], keys: Iterable, binary=False,", "by the Licensor under the License, as defined below, subject", "value, }, response_config=[ ('value', AnyDataObject), ], ) if result.status ==", "with a given key to cache (overwriting existing value if", "cache_remove_keys( connection: 'Connection', cache: Union[str, int], keys: Iterable, binary=False, query_id=None,", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "value indicating whether given key is present in cache. :param", "Commons Clause Restriction # # The Software is provided to", "'flag': 1 if binary else 0, 'key': key, }, response_config=[", ":param query_id: a value generated by client and returned as-is", "OP_CACHE_GET_ALL, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id,", "returning previous value for that key, if and only if", "= result.value['value'] return result def cache_get_and_put( connection: 'Connection', cache: Union[str,", "'APIResult': \"\"\" Removes the cache entry with specified key, returning", "the “Licensor” is Copyright 2019 GridGain Systems, Inc., # the", "result.value = result.value['success'] return result def cache_clear( connection: 'Connection', cache:", "Union[str, int], key, value, key_hint=None, value_hint=None, binary=False, query_id=None, ) ->", "\"\"\" Removes all entries from cache, notifying listeners and cache", "the previous value for that key, or null value if", "on success: `True` when key is present, `False` otherwise, non-zero", "otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR_KEY, [ ('hash_code', Int), ('flag',", "the given key should be converted, :param binary: pass True", "key, returning the value. :param connection: connection to Ignite server,", "default, :param query_id: a value generated by client and returned", "is Copyright 2019 GridGain Systems, Inc., # the “License” is", "other consideration (including without # limitation fees for hosting or", "to compare the stored value with, :param key_hint: (optional) Ignite", "fee or other consideration (including without # limitation fees for", "the cache entry, :param sample: a sample to compare the", "an old value or None if a value is written,", "('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id, ) return", "result.value['success'] return result def cache_remove_if_equals( connection: 'Connection', cache: Union[str, int],", "for that key, or null value if there was not", "= Query( OP_CACHE_PUT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "is present, `False` otherwise, non-zero status and an error description", "should be converted :param value_hint: (optional) Ignite data type, for", "converted, :param binary: (optional) pass True to keep the value", "count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or", "to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup", "generated, :return: API result data object. Contains zero status and", "0: result.value = result.value['value'] return result def cache_replace( connection: 'Connection',", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ ('hash_code',", "generated, :return: API result data object. Contains zero status on", "whether all given keys are present in cache. :param connection:", "0: result.value = dict(result.value)['data'] return result def cache_put_all( connection: 'Connection',", "non-zero status and an error description otherwise. \"\"\" query_struct =", "with the License. You may obtain a # copy of", "0, 'peek_modes': peek_modes, }, response_config=[ ('count', Long), ], ) if", "supported type, :param key_hint: (optional) Ignite data type, for which", "0, 'data': pairs, }, ) def cache_contains_key( connection: 'Connection', cache:", "if something has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS,", "-> 'APIResult': \"\"\" Clears the cache without notifying listeners or", "was not such key. :param connection: connection to Ignite server,", ":param key: key for the cache entry, :param sample: a", "connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0,", "== 0: result.value = result.value['value'] return result def cache_get_and_replace( connection:", "Restriction; you may not use this file except in compliance", "multiple key-value pairs from cache. :param connection: connection to Ignite", "cache_remove_if_equals( connection: 'Connection', cache: Union[str, int], key, sample, key_hint=None, sample_hint=None,", "else: peek_modes = [peek_modes] query_struct = Query( OP_CACHE_GET_SIZE, [ ('hash_code',", "generated by client and returned as-is in response.query_id. When the", "ID of the cache, :param binary: (optional) pass True to", "object. Contains zero status and a value retrieved on success,", "number of entries in cache. :param connection: connection to Ignite", "\"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT, [ ('hash_code', Int), ('flag', Byte),", "keys, }, ) def cache_remove_key( connection: 'Connection', cache: Union[str, int],", "# service whose value derives, entirely or substantially, from the", "= Query( OP_CACHE_REMOVE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "value: new value for the given key, :param key_hint: (optional)", "OP_CACHE_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "peek_modes = [peek_modes] query_struct = Query( OP_CACHE_GET_SIZE, [ ('hash_code', Int),", "be converted, :param binary: pass True to keep the value", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR_KEYS,", "query_struct = Query( OP_CACHE_CLEAR_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys',", "query_struct = Query( OP_CACHE_GET_ALL, [ ('hash_code', Int), ('flag', Byte), ('keys',", "The Software is provided to you by the Licensor under", "will not # include, and the License does not grant", "'flag': 1 if binary else 0, }, ) def cache_get_size(", "cache_contains_keys( connection: 'Connection', cache: Union[str, int], keys: Iterable, binary=False, query_id=None,", "a # copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "random value is generated, :return: API result data object. Contains", "query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ ('hash_code', Int), ('flag', Byte), ('key',", ":param cache: name or ID of the cache, :param peek_modes:", "query_struct = Query( OP_CACHE_PUT, [ ('hash_code', Int), ('flag', Byte), ('key',", "key-value pairs to save. Each key or value can be", "return result def cache_get_and_remove( connection: 'Connection', cache: Union[str, int], key,", "the clause above, the “Licensor” is Copyright 2019 GridGain Systems,", "connection: 'Connection', cache: Union[str, int], key, sample, key_hint=None, sample_hint=None, binary=False,", "('success', Bool), ], ) if result.status == 0: result.value =", "for hosting or consulting/ support services related to the Software),", "peek_modes = [] else: peek_modes = [peek_modes] query_struct = Query(", "result.value = result.value['value'] return result def cache_replace( connection: 'Connection', cache:", "pairs, }, ) def cache_contains_key( connection: 'Connection', cache: Union[str, int],", "Union[str, int], keys: Iterable, binary=False, query_id=None, ) -> 'APIResult': \"\"\"", "], ) if result.status == 0: result.value = result.value['success'] return", "result def cache_put_all( connection: 'Connection', cache: Union[str, int], pairs: dict,", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears the cache key", "specified key, returning the value. :param connection: connection to Ignite", "cache, :param pairs: dictionary type parameters, contains key-value pairs to", "converted. :param binary: (optional) pass True to keep the value", "dict(result.value)['data'] return result def cache_put_all( connection: 'Connection', cache: Union[str, int],", "key: key for the cache entry, :param sample: a sample", "or agreed to in writing, software distributed under the #", "(optional) a value generated by client and returned as-is in", "'key': key, }, response_config=[ ('success', Bool), ], ) if result.status", "gone wrong. \"\"\" query_struct = Query( OP_CACHE_REMOVE_KEY, [ ('hash_code', Int),", "Removes all entries from cache, notifying listeners and cache writers.", "}) def cache_get( connection: 'Connection', cache: Union[str, int], key, key_hint=None,", "value can be an item of representable Python type or", "result def cache_put_if_absent( connection: 'Connection', cache: Union[str, int], key, value,", "= Query( OP_CACHE_GET_AND_PUT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "is present in cache. :param connection: connection to Ignite server,", "('value', value_hint or AnyDataObject), ], query_id=query_id, ) result = query_struct.perform(", "sample_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], query_id=query_id, )", "error description otherwise. \"\"\" query_struct = Query( OP_CACHE_REMOVE_KEYS, [ ('hash_code',", "default, :param query_id: (optional) a value generated by client and", "result def cache_clear( connection: 'Connection', cache: Union[str, int], binary=False, query_id=None,", "OP_CACHE_CONTAINS_KEY, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", ":param cache: name or ID of the cache, :param keys:", "is written, non-zero status and an error description otherwise. \"\"\"", "= Query( OP_CACHE_REPLACE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "if key-value pairs are written, non-zero status and an error", ":param cache: name or ID of the cache, :param binary:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "and an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_REPLACE,", "as-is in response.query_id. When the parameter is omitted, a random", "associations if any). :param connection: connection to Ignite server, :param", "data type, for which the given value should be converted.", "(PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), :param binary: (optional)", "key, 'value': value, }) def cache_get( connection: 'Connection', cache: Union[str,", "status and a number of cache entries on success, non-zero", "given key if provided value is equal to actual value,", "pairs to cache (overwriting existing associations if any). :param connection:", "query_struct = Query( OP_CACHE_GET, [ ('hash_code', Int), ('flag', Byte), ('key',", "in compliance with the License. You may obtain a #", ") -> 'APIResult': \"\"\" Removes an entry with a given", "converted, :param sample_hint: (optional) Ignite data type, for whic the", "given value should be converted, :param binary: (optional) pass True", "AnyDataObject), ], query_id=query_id, ) return query_struct.perform(connection, { 'hash_code': cache_id(cache), 'flag':", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ ('hash_code', Int),", "cache: name or ID of the cache, :param pairs: dictionary", "the given sample should be converted :param value_hint: (optional) Ignite", "\"\"\" query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte),", "== 0: result.value = result.value['success'] return result def cache_remove_keys( connection:", "\"\"\" query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag', Byte),", "entries in cache. :param connection: connection to Ignite server, :param", "subject to # the following condition. # # Without limiting", "for the cache entry, :param key_hint: (optional) Ignite data type,", "otherwise. \"\"\" if not isinstance(peek_modes, (list, tuple)): if peek_modes ==", "result = query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if", "zero status if a value is written, non-zero status and", "= Query( OP_CACHE_CLEAR_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()),", "notifying listeners or cache writers. :param connection: connection to Ignite", "value_hint or AnyDataObject), ], query_id=query_id, ) return query_struct.perform(connection, { 'hash_code':", "(PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to", "('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id, ) result", "can be an item of representable Python type or a", "response_config=[ ('count', Long), ], ) if result.status == 0: result.value", "def cache_replace_if_equals( connection: 'Connection', cache: Union[str, int], key, sample, value,", "a number of cache entries on success, non-zero status and", "'APIResult': \"\"\" Puts multiple key-value pairs to cache (overwriting existing", "key should be converted, :param binary: (optional) pass True to", "limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY),", "Clears the cache key without notifying listeners or cache writers.", "== 0: result.value = dict(result.value)['data'] return result def cache_put_all( connection:", "OP_CACHE_CLEAR_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id,", "status if a value is written, non-zero status and an", "('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ], query_id=query_id,", "does not already exist. :param connection: connection to Ignite server,", "key, key_hint: object=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears", "data object. Contains zero status if key-value pairs are written,", "'keys': keys, }, ) def cache_remove_all( connection: 'Connection', cache: Union[str,", "not isinstance(peek_modes, (list, tuple)): if peek_modes == 0: peek_modes =", "modified with Commons Clause # Restriction; you may not use", "which the given key should be converted, :param binary: (optional)", "is written, non-zero status and an error description in case", ":return: API result data object. Contains zero status if a", "a value from cache by key. :param connection: connection to", "query_struct = Query( OP_CACHE_REMOVE_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys',", "if the key already exists and value equals provided sample.", "the right to Sell the Software. # For purposes of", "OP_CACHE_GET, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "following condition. # # Without limiting other conditions in the", "of the cache, :param key: key for the cache entry.", "(optional) Ignite data type, for whic the given sample should", "if the key already exist. :param connection: connection to Ignite", "list of keys or (key, type hint) tuples, :param binary:", "result.status == 0: result.value = result.value['value'] return result def cache_get_and_remove(", ") -> 'APIResult': \"\"\" Clears the cache key without notifying", "query_id: (optional) a value generated by client and returned as-is", "# GridGain Community Edition Licensing # Copyright 2019 GridGain Systems,", "omitted, a random value is generated, :return: API result data", "object. Contains zero status and a bool value retrieved on", "the cache, :param keys: a list of keys or (key,", "Iterable, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes entries with", "result.status == 0: result.value = result.value['success'] return result def cache_remove_keys(", "result.value['value'] return result def cache_get_and_put( connection: 'Connection', cache: Union[str, int],", "a given key to cache, returning previous value for that", "given key to cache, and returns the previous value for", "returned as-is in response.query_id. When the parameter is omitted, a", "def cache_get_all( connection: 'Connection', cache: Union[str, int], keys: Iterable, binary=False,", "key, if and only if there is a value currently", "and an error description if something has gone wrong. \"\"\"", "OP_CACHE_GET_AND_REMOVE, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "Query( OP_CACHE_GET_AND_REPLACE, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "Query( OP_CACHE_REPLACE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or", "def cache_replace( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None,", "'APIResult': \"\"\" Removes entries with given keys, notifying listeners and", "result def cache_get_and_replace( connection: 'Connection', cache: Union[str, int], key, value,", "}, response_config=[ ('success', Bool), ], ) if result.status == 0:", "cache_remove_all( connection: 'Connection', cache: Union[str, int], binary=False, query_id=None, ) ->", ":param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR),", "under the License will not # include, and the License", "cache by key. :param connection: connection to Ignite server, :param", "value retrieved on success: `True` when key is present, `False`", "failure. \"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ ('hash_code', Int), ('flag',", "object. Contains zero status and an old value or None", "or ID of the cache, :param binary: (optional) pass True", "or None if a value is written, non-zero status and", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR_KEYS, [ ('hash_code', Int),", "result.value['value'] return result def cache_replace( connection: 'Connection', cache: Union[str, int],", "if something has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REMOVE_KEY,", "gone wrong. \"\"\" query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ ('hash_code', Int),", "an error description otherwise. \"\"\" if not isinstance(peek_modes, (list, tuple)):", "\"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag', Byte),", "whic the given sample should be converted :param value_hint: (optional)", "0, 'keys': keys, }, response_config=[ ('data', Map), ], ) if", "with a given key to cache, returning previous value for", "else 0, 'key': key, }, ) def cache_clear_keys( connection: 'Connection',", "server, :param cache: name or ID of the cache, :param", "else 0, 'key': key, }, response_config=[ ('value', AnyDataObject), ], )", "-> 'APIResult': \"\"\" Retrieves a value from cache by key.", "Query( OP_CACHE_CONTAINS_KEYS, [ ('hash_code', Int), ('flag', Byte), ('keys', AnyDataArray()), ],", "AnyDataObject), ('value', value_hint or AnyDataObject), ], query_id=query_id, ) result =", "-> 'APIResult': \"\"\" Puts multiple key-value pairs to cache (overwriting", "else 0, 'data': pairs, }, ) def cache_contains_key( connection: 'Connection',", "for the cache entry. Can be of any supported type,", "query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte), ('key',", "object. Contains zero status and a number of cache entries", "converted, :param value_hint: (optional) Ignite data type, for which the", "only if there is a value currently mapped for that", "zero status and a number of cache entries on success,", "retrieved on success, non-zero status and an error description on", "cache_get_and_replace( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None, value_hint=None,", "of representable Python type or a tuple of (item, hint),", "\"\"\" Removes the cache entry with specified key, returning the", "int], binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes all entries", "binary else 0, 'key': key, 'sample': sample, }, response_config=[ ('success',", "cache_put( connection: 'Connection', cache: Union[str, int], key, value, key_hint=None, value_hint=None,", "OR CONDITIONS OF ANY # KIND, either express or implied.", "key should be converted, :param binary: pass True to keep", "value is written, non-zero status and an error description otherwise.", "binary=False, query_id=None, ) -> 'APIResult': \"\"\" Retrieves a value from", "(overwriting existing associations if any). :param connection: connection to Ignite", "('count', Long), ], ) if result.status == 0: result.value =", "and cache writers. :param connection: connection to Ignite server, :param", "grant of rights under the License will not # include,", "('value', AnyDataObject), ], ) if result.status != 0: return result", "0, 'key': key, 'value': value, }, response_config=[ ('value', AnyDataObject), ],", "or AnyDataObject), ('sample', sample_hint or AnyDataObject), ], query_id=query_id, ) result", "API result data object. Contains zero status and a dict,", "such key. :param connection: connection to Ignite server, :param cache:", "rights granted to you # under the License to provide", "API result data object. Contains zero status if key-value pairs", "limiting other conditions in the License, the grant of rights", "1 if binary else 0, 'data': pairs, }, ) def", "purposes of the clause above, the “Licensor” is Copyright 2019", "non-zero status and an error description if something has gone", "data object. Contains zero status and a number of cache", "return result def cache_remove_if_equals( connection: 'Connection', cache: Union[str, int], key,", "\"\"\" query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte),", "# Commons Clause Restriction # # The Software is provided", "rights under the License will not # include, and the", "whether given key is present in cache. :param connection: connection", "entries with given keys, notifying listeners and cache writers. :param", "type, for which the given key should be converted, :param", "Clause # License Condition notice. # # For purposes of", "description otherwise. \"\"\" if not isinstance(peek_modes, (list, tuple)): if peek_modes", "success, non-zero status and an error description otherwise. \"\"\" query_struct", "for the given key, :param key_hint: (optional) Ignite data type,", "value is written, non-zero status and an error description in", "keys are present, `False` otherwise, non-zero status and an error", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ ('hash_code', Int),", "something has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REPLACE, [", "key is present in cache. :param connection: connection to Ignite", "otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ ('hash_code', Int), ('flag',", "if binary else 0, 'key': key, }, response_config=[ ('value', Bool),", ") from pyignite.datatypes.key_value import PeekModes from pyignite.queries import Query, Response", "result.status == 0: result.value = result.value['value'] return result def cache_contains_keys(", "None, non-zero status and an error description otherwise. \"\"\" query_struct", "code, or non-zero status and an error description if something", "or ID of the cache, :param pairs: dictionary type parameters,", "query_id=None, ) -> 'APIResult': \"\"\" Removes the cache entry with", "1 if binary else 0, 'key': key, 'value': value, },", "already exists and value equals provided sample. :param connection: connection", "status and an error description otherwise. \"\"\" if not isinstance(peek_modes,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "failure. \"\"\" query_struct = Query( OP_CACHE_GET_ALL, [ ('hash_code', Int), ('flag',", "other conditions in the License, the grant of rights under", "if binary else 0, 'key': key, 'value': value, }) def", "limitation fees for hosting or consulting/ support services related to", "('sample', sample_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], query_id=query_id,", "derives, entirely or substantially, from the functionality of the Software.", "key, key_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Removes the", "result def cache_replace( connection: 'Connection', cache: Union[str, int], key, value,", "query_id=None, ) -> 'APIResult': \"\"\" Puts a value with a", "response_config=[ ('value', AnyDataObject), ], ) if result.status == 0: result.value", "of the cache, :param pairs: dictionary type parameters, contains key-value", "of keys or (key, type hint) tuples, :param binary: pass", "cache, :param key: key for the cache entry. Can be", "key, }, response_config=[ ('value', AnyDataObject), ], ) if result.status !=", "\"\"\" Gets the number of entries in cache. :param connection:", "an error description otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_REMOVE, [", "Int), ('flag', Byte), ('keys', AnyDataArray()), ], query_id=query_id, ) return query_struct.perform(", "(PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to all cache partitions", "CONDITIONS OF ANY # KIND, either express or implied. See", "query_id=None, ) -> 'APIResult': \"\"\" Returns a value indicating whether", "if result.status == 0: result.value = dict(result.value)['data'] return result def", "('hash_code', Int), ('flag', Byte), ('data', Map), ], query_id=query_id, ) return", "ID of the cache, :param key: key for the cache", "peek_modes, }, response_config=[ ('count', Long), ], ) if result.status ==", "number of cache entries on success, non-zero status and an", "# # Commons Clause Restriction # # The Software is", "response_config=[ ('value', AnyDataObject), ], ) if result.status != 0: return", "keys: Iterable, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Returns a", ") -> 'APIResult': \"\"\" Retrieves a value from cache by", "support services related to the Software), a product or #", "Restriction # # The Software is provided to you by", "('value', AnyDataObject), ], ) if result.status == 0: result.value =", "key, }, ) def cache_clear_keys( connection: 'Connection', cache: Union[str, int],", "means practicing any or all of the rights granted to", "(optional) pass True to keep the value in binary form.", "cache only if the key already exist. :param connection: connection", "if binary else 0, 'keys': keys, }, response_config=[ ('value', Bool),", "key to cache (overwriting existing value if any). :param connection:", "given key to cache only if the key does not", "if not isinstance(peek_modes, (list, tuple)): if peek_modes == 0: peek_modes", "== 0: result.value = result.value['value'] return result def cache_put_if_absent( connection:", "parameter is omitted, a random value is generated, :return: API", ":return: API result data object. Contains zero status on success,", "# the following condition. # # Without limiting other conditions", "API result data object. Contains zero status and a value", "\"\"\" query_struct = Query( OP_CACHE_GET, [ ('hash_code', Int), ('flag', Byte),", "('keys', AnyDataArray()), ], query_id=query_id, ) return query_struct.perform( connection, query_params={ 'hash_code':", "def cache_remove_all( connection: 'Connection', cache: Union[str, int], binary=False, query_id=None, )", "if a value is written, non-zero status and an error", "may obtain a # copy of the License at #", "\"\"\" query_struct = Query( OP_CACHE_REMOVE_KEYS, [ ('hash_code', Int), ('flag', Byte),", "from the functionality of the Software. # Any license notice", "value. :param connection: connection to Ignite server, :param cache: name", "-> 'APIResult': \"\"\" Removes all entries from cache, notifying listeners", "connection to Ignite server, :param cache: name or ID of", "{ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'key':", "given value should be converted. :param binary: pass True to", "otherwise. \"\"\" query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag',", "OP_CACHE_GET_AND_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject),", "Byte), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ],", "the Apache License, Version 2.0 (the \"License\") modified with Commons", "for whic the given sample should be converted :param binary:", "error description on failure. \"\"\" query_struct = Query( OP_CACHE_CONTAINS_KEY, [", "the number of entries in cache. :param connection: connection to", "is equal to actual value, notifying listeners and cache writers.", "Puts multiple key-value pairs to cache (overwriting existing associations if", "GridGain Systems, Inc., # the “License” is the Apache License,", "type parameters, contains key-value pairs to save. Each key or", "= dict(result.value)['data'] return result def cache_put_all( connection: 'Connection', cache: Union[str,", "value for the given key, :param key_hint: (optional) Ignite data", "should be converted. :param binary: pass True to keep the", "by applicable law or agreed to in writing, software distributed", "Union[str, int], key, sample, value, key_hint=None, sample_hint=None, value_hint=None, binary=False, query_id=None,", "listeners or cache writers. :param connection: connection to Ignite server,", "int], key, key_hint=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Retrieves", "cache entry with specified key, returning the value. :param connection:", "# Without limiting other conditions in the License, the grant", "0, 'keys': keys, }, response_config=[ ('value', Bool), ], ) if", "cache_clear( connection: 'Connection', cache: Union[str, int], binary=False, query_id=None, ) ->", "Copyright 2019 GridGain Systems, Inc., # the “License” is the", "the key already exists and value equals provided sample. :param", "0, 'key': key, 'value': value, }) def cache_get( connection: 'Connection',", "indicating whether all given keys are present in cache. :param", "ID of the cache, :param pairs: dictionary type parameters, contains", "list, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears the cache", "you by the Licensor under the License, as defined below,", "dictionary type parameters, contains key-value pairs to save. Each key", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_CLEAR, [ ('hash_code', Int),", "value_hint or AnyDataObject), ], query_id=query_id, ) result = query_struct.perform( connection,", ") if result.status != 0: return result result.value = result.value['value']", "Contains zero status on success, non-zero status and an error", "cache: Union[str, int], keys: list, binary=False, query_id=None, ) -> 'APIResult':", "a value currently mapped for that key. :param connection: connection", ":return: API result data object. Contains zero status if key-value", "= Query( OP_CACHE_REMOVE_ALL, [ ('hash_code', Int), ('flag', Byte), ], query_id=query_id,", ") -> 'APIResult': \"\"\" Removes entries with given keys, notifying", "= result.value['success'] return result def cache_clear( connection: 'Connection', cache: Union[str,", "'hash_code': cache_id(cache), 'flag': 1 if binary else 0, }, )", "the rights granted to you # under the License to", "should be converted, :param value_hint: (optional) Ignite data type, for", ":return: API result data object. Contains zero status and a", "('key', key_hint or AnyDataObject), ], query_id=query_id, ) return query_struct.perform( connection,", "the functionality of the Software. # Any license notice or", "specific language governing permissions # and limitations under the License.", "be converted, :param sample_hint: (optional) Ignite data type, for whic", "0, 'key': key, }, response_config=[ ('value', Bool), ], ) if", "cache entry. Can be of any supported type, :param value:", "item of representable Python type or a tuple of (item,", "data type, for which the given key should be converted,", "Iterable, Union from pyignite.queries.op_codes import * from pyignite.datatypes import (", "Commons Clause # Restriction; you may not use this file", "result.value = result.value['value'] return result def cache_contains_keys( connection: 'Connection', cache:", "and only if there is a value currently mapped for", "a value indicating whether all given keys are present in", "data object. Contains zero status and a dict, made of", "else 0, }, ) def cache_clear_key( connection: 'Connection', cache: Union[str,", "the cache, :param keys: list of keys or tuples of", "0: result.value = result.value['success'] return result def cache_get_and_put_if_absent( connection: 'Connection',", "cache: name or ID of the cache, :param keys: a", "or a tuple of (item, hint), :param binary: (optional) pass", "cache key without notifying listeners or cache writers. :param connection:", "'key': key, }, ) def cache_clear_keys( connection: 'Connection', cache: Union[str,", "all given keys are present in cache. :param connection: connection", ":param value_hint: (optional) Ignite data type, for which the given", "the License, as defined below, subject to # the following", "object=None, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears the cache", "int], peek_modes=0, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Gets the", "0, 'key': key, }, ) def cache_clear_keys( connection: 'Connection', cache:", "services related to the Software), a product or # service", "key. :param connection: connection to Ignite server, :param cache: name", "without notifying listeners or cache writers. :param connection: connection to", "a value with a given key to cache, returning previous", "key: key for the cache entry, :param key_hint: (optional) Ignite", "pairs: dict, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Puts multiple", "= Query( OP_CACHE_PUT_IF_ABSENT, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "error description in case of error. \"\"\" query_struct = Query(", "int], keys: list, binary=False, query_id=None, ) -> 'APIResult': \"\"\" Clears", "result def cache_remove_if_equals( connection: 'Connection', cache: Union[str, int], key, sample,", "}, ) def cache_clear_keys( connection: 'Connection', cache: Union[str, int], keys:", ") -> 'APIResult': \"\"\" Removes all entries from cache, notifying", "does not grant to you, the right to Sell the", "if binary else 0, }, ) def cache_clear_key( connection: 'Connection',", "value from cache by key. :param connection: connection to Ignite", "peek_modes == 0: peek_modes = [] else: peek_modes = [peek_modes]", "('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ('sample', sample_hint", "0, 'keys': keys, }, ) def cache_remove_key( connection: 'Connection', cache:", "cache_id def cache_put( connection: 'Connection', cache: Union[str, int], key, value,", "= Query( OP_CACHE_REPLACE, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint", "all of the rights granted to you # under the", "Licensor under the License, as defined below, subject to #", "an error description on failure. \"\"\" query_struct = Query( OP_CACHE_GET,", ":param keys: list of keys or tuples of (key, key_hint),", "binary else 0, 'keys': keys, }, ) def cache_remove_all( connection:", "'key': key, 'sample': sample, 'value': value, }, response_config=[ ('success', Bool),", "Software. # Any license notice or attribution required by the", "query_struct = Query( OP_CACHE_PUT_ALL, [ ('hash_code', Int), ('flag', Byte), ('data',", "cache, returning previous value for that key, if and only", "new value for the given key, :param key_hint: (optional) Ignite", "with, :param key_hint: (optional) Ignite data type, for which the", "description otherwise. \"\"\" query_struct = Query( OP_CACHE_REMOVE_KEYS, [ ('hash_code', Int),", "or backup cache (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL),", "is generated, :return: API result data object. Contains zero status", "OP_CACHE_PUT_ALL, [ ('hash_code', Int), ('flag', Byte), ('data', Map), ], query_id=query_id,", "result.status == 0: result.value = result.value['value'] return result def cache_put_if_absent(", "or value can be an item of representable Python type", "cache_put_all( connection: 'Connection', cache: Union[str, int], pairs: dict, binary=False, query_id=None,", "if binary else 0, 'keys': keys, }, ) def cache_remove_key(", "status and an error description if something has gone wrong.", "query_struct = Query( OP_CACHE_REMOVE_ALL, [ ('hash_code', Int), ('flag', Byte), ],", "partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults", "or AnyDataObject), ], query_id=query_id, ) result = query_struct.perform( connection, query_params={", "multiple key-value pairs to cache (overwriting existing associations if any).", "Query, Response from pyignite.utils import cache_id def cache_put( connection: 'Connection',", "'APIResult': \"\"\" Clears the cache without notifying listeners or cache", "if something has gone wrong. \"\"\" query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS,", "name or ID of the cache, :param pairs: dictionary type", "result.value['value'] return result def cache_get_and_remove( connection: 'Connection', cache: Union[str, int],", "License, Version 2.0, and the Software is the GridGain Community", "value_hint: (optional) Ignite data type, for which the given value", "or ID of the cache, :param keys: a list of", "should be converted. :param binary: (optional) pass True to keep", "connection: 'Connection', cache: Union[str, int], key, key_hint=None, binary=False, query_id=None, )", "key should be converted, :param value_hint: (optional) Ignite data type,", ":param key: key for the cache entry. Can be of", "result.value = result.value['success'] return result def cache_remove_if_equals( connection: 'Connection', cache:" ]
[ "+ \"/negative\" seed_positive =path_allpairs + \"/positivee\" seed_file_na = seed_negative +", "= os.path.split(path)[0] os.chdir(path) print path device_ssh_ip = \"\" ssh_device =", "path device_ssh_ip = \"\" ssh_device = device_ssh_ip.split(\",\") path_tcs = path", "print path device_ssh_ip = \"\" ssh_device = device_ssh_ip.split(\",\") path_tcs =", "seed_file = path_allpairs + \"/positive/input_seed.txt\" seed_negative = path_allpairs + \"/negative\"", "+ \"/selfcomb.txt\" output_file = path_allpairs + \"/output.txt\" output_file_ne = path_allpairs", "= path + \"/tcs\" path_result= path + \"/result\" path_allpairs =", "path + \"/allpairs\" path_resource = path + \"/resource\" seed_file =", "\"/script\" log_path = report_path + \"/log_\" device_path = \"/home/app/content/tct/\" run_times", "os.chdir(path) print path device_ssh_ip = \"\" ssh_device = device_ssh_ip.split(\",\") path_tcs", "report_file = report_path + \"/wrt-manifest-tizen-tests.xml\" report_summary_file = report_path + \"/summary.xml\"", "import itertools, shutil path = os.path.abspath(__file__) path = os.path.split(path)[0] os.chdir(path)", "report_path = path + \"/report\" report_file = report_path + \"/wrt-manifest-tizen-tests.xml\"", "= device_ssh_ip.split(\",\") path_tcs = path + \"/tcs\" path_result= path +", "seed_negative + \"/input_seed_negative.txt\" selfcomb_file = path_allpairs + \"/selfcomb.txt\" output_file =", "\"\" ssh_device = device_ssh_ip.split(\",\") path_tcs = path + \"/tcs\" path_result=", "path_allpairs + \"/positive/input_seed.txt\" seed_negative = path_allpairs + \"/negative\" seed_positive =path_allpairs", "+ \"/output_negative.txt\" report_path = path + \"/report\" report_file = report_path", "+ \"/log_\" device_path = \"/home/app/content/tct/\" run_times = 3 version=\"6.35.1.2\" name=\"wrt-manifest-tizen-tests\"", "selfcomb_file = path_allpairs + \"/selfcomb.txt\" output_file = path_allpairs + \"/output.txt\"", "+ \"/allpairs\" path_resource = path + \"/resource\" seed_file = path_allpairs", "output_file = path_allpairs + \"/output.txt\" output_file_ne = path_allpairs + \"/output_negative.txt\"", "log_path = report_path + \"/log_\" device_path = \"/home/app/content/tct/\" run_times =", "+ \"/report\" report_file = report_path + \"/wrt-manifest-tizen-tests.xml\" report_summary_file = report_path", "\"/allpairs\" path_resource = path + \"/resource\" seed_file = path_allpairs +", "= path + \"/allpairs\" path_resource = path + \"/resource\" seed_file", "+ \"/positivee\" seed_file_na = seed_negative + \"/input_seed_negative.txt\" selfcomb_file = path_allpairs", "path_allpairs + \"/output_negative.txt\" report_path = path + \"/report\" report_file =", "path_allpairs = path + \"/allpairs\" path_resource = path + \"/resource\"", "sh_path = path + \"/script\" log_path = report_path + \"/log_\"", "= \"\" ssh_device = device_ssh_ip.split(\",\") path_tcs = path + \"/tcs\"", "path + \"/result\" path_allpairs = path + \"/allpairs\" path_resource =", "\"/negative\" seed_positive =path_allpairs + \"/positivee\" seed_file_na = seed_negative + \"/input_seed_negative.txt\"", "\"/output_negative.txt\" report_path = path + \"/report\" report_file = report_path +", "+ \"/output.txt\" output_file_ne = path_allpairs + \"/output_negative.txt\" report_path = path", "device_ssh_ip.split(\",\") path_tcs = path + \"/tcs\" path_result= path + \"/result\"", "seed_negative = path_allpairs + \"/negative\" seed_positive =path_allpairs + \"/positivee\" seed_file_na", "\"/output.txt\" output_file_ne = path_allpairs + \"/output_negative.txt\" report_path = path +", "= path + \"/script\" log_path = report_path + \"/log_\" device_path", "path + \"/script\" log_path = report_path + \"/log_\" device_path =", "device_ssh_ip = \"\" ssh_device = device_ssh_ip.split(\",\") path_tcs = path +", "+ \"/resource\" seed_file = path_allpairs + \"/positive/input_seed.txt\" seed_negative = path_allpairs", "path_allpairs + \"/negative\" seed_positive =path_allpairs + \"/positivee\" seed_file_na = seed_negative", "os import itertools, shutil path = os.path.abspath(__file__) path = os.path.split(path)[0]", "python import sys, os import itertools, shutil path = os.path.abspath(__file__)", "\"/resource\" seed_file = path_allpairs + \"/positive/input_seed.txt\" seed_negative = path_allpairs +", "report_summary_file = report_path + \"/summary.xml\" sh_path = path + \"/script\"", "+ \"/wrt-manifest-tizen-tests.xml\" report_summary_file = report_path + \"/summary.xml\" sh_path = path", "path = os.path.abspath(__file__) path = os.path.split(path)[0] os.chdir(path) print path device_ssh_ip", "=path_allpairs + \"/positivee\" seed_file_na = seed_negative + \"/input_seed_negative.txt\" selfcomb_file =", "= os.path.abspath(__file__) path = os.path.split(path)[0] os.chdir(path) print path device_ssh_ip =", "path_tcs = path + \"/tcs\" path_result= path + \"/result\" path_allpairs", "= path_allpairs + \"/negative\" seed_positive =path_allpairs + \"/positivee\" seed_file_na =", "sys, os import itertools, shutil path = os.path.abspath(__file__) path =", "\"/positivee\" seed_file_na = seed_negative + \"/input_seed_negative.txt\" selfcomb_file = path_allpairs +", "= report_path + \"/wrt-manifest-tizen-tests.xml\" report_summary_file = report_path + \"/summary.xml\" sh_path", "= report_path + \"/log_\" device_path = \"/home/app/content/tct/\" run_times = 3", "output_file_ne = path_allpairs + \"/output_negative.txt\" report_path = path + \"/report\"", "shutil path = os.path.abspath(__file__) path = os.path.split(path)[0] os.chdir(path) print path", "\"/selfcomb.txt\" output_file = path_allpairs + \"/output.txt\" output_file_ne = path_allpairs +", "report_path + \"/wrt-manifest-tizen-tests.xml\" report_summary_file = report_path + \"/summary.xml\" sh_path =", "path_allpairs + \"/output.txt\" output_file_ne = path_allpairs + \"/output_negative.txt\" report_path =", "path + \"/report\" report_file = report_path + \"/wrt-manifest-tizen-tests.xml\" report_summary_file =", "itertools, shutil path = os.path.abspath(__file__) path = os.path.split(path)[0] os.chdir(path) print", "report_path + \"/log_\" device_path = \"/home/app/content/tct/\" run_times = 3 version=\"6.35.1.2\"", "\"/positive/input_seed.txt\" seed_negative = path_allpairs + \"/negative\" seed_positive =path_allpairs + \"/positivee\"", "= path_allpairs + \"/output.txt\" output_file_ne = path_allpairs + \"/output_negative.txt\" report_path", "+ \"/tcs\" path_result= path + \"/result\" path_allpairs = path +", "= path_allpairs + \"/output_negative.txt\" report_path = path + \"/report\" report_file", "seed_file_na = seed_negative + \"/input_seed_negative.txt\" selfcomb_file = path_allpairs + \"/selfcomb.txt\"", "\"/summary.xml\" sh_path = path + \"/script\" log_path = report_path +", "+ \"/positive/input_seed.txt\" seed_negative = path_allpairs + \"/negative\" seed_positive =path_allpairs +", "= path + \"/report\" report_file = report_path + \"/wrt-manifest-tizen-tests.xml\" report_summary_file", "path + \"/resource\" seed_file = path_allpairs + \"/positive/input_seed.txt\" seed_negative =", "path_allpairs + \"/selfcomb.txt\" output_file = path_allpairs + \"/output.txt\" output_file_ne =", "+ \"/result\" path_allpairs = path + \"/allpairs\" path_resource = path", "path + \"/tcs\" path_result= path + \"/result\" path_allpairs = path", "seed_positive =path_allpairs + \"/positivee\" seed_file_na = seed_negative + \"/input_seed_negative.txt\" selfcomb_file", "= report_path + \"/summary.xml\" sh_path = path + \"/script\" log_path", "= path_allpairs + \"/positive/input_seed.txt\" seed_negative = path_allpairs + \"/negative\" seed_positive", "ssh_device = device_ssh_ip.split(\",\") path_tcs = path + \"/tcs\" path_result= path", "= path_allpairs + \"/selfcomb.txt\" output_file = path_allpairs + \"/output.txt\" output_file_ne", "\"/wrt-manifest-tizen-tests.xml\" report_summary_file = report_path + \"/summary.xml\" sh_path = path +", "+ \"/input_seed_negative.txt\" selfcomb_file = path_allpairs + \"/selfcomb.txt\" output_file = path_allpairs", "os.path.abspath(__file__) path = os.path.split(path)[0] os.chdir(path) print path device_ssh_ip = \"\"", "= seed_negative + \"/input_seed_negative.txt\" selfcomb_file = path_allpairs + \"/selfcomb.txt\" output_file", "report_path + \"/summary.xml\" sh_path = path + \"/script\" log_path =", "+ \"/summary.xml\" sh_path = path + \"/script\" log_path = report_path", "\"/result\" path_allpairs = path + \"/allpairs\" path_resource = path +", "= path + \"/resource\" seed_file = path_allpairs + \"/positive/input_seed.txt\" seed_negative", "\"/report\" report_file = report_path + \"/wrt-manifest-tizen-tests.xml\" report_summary_file = report_path +", "os.path.split(path)[0] os.chdir(path) print path device_ssh_ip = \"\" ssh_device = device_ssh_ip.split(\",\")", "path_resource = path + \"/resource\" seed_file = path_allpairs + \"/positive/input_seed.txt\"", "import sys, os import itertools, shutil path = os.path.abspath(__file__) path", "#!/usr/bin/env python import sys, os import itertools, shutil path =", "path = os.path.split(path)[0] os.chdir(path) print path device_ssh_ip = \"\" ssh_device", "path_result= path + \"/result\" path_allpairs = path + \"/allpairs\" path_resource", "\"/tcs\" path_result= path + \"/result\" path_allpairs = path + \"/allpairs\"", "\"/input_seed_negative.txt\" selfcomb_file = path_allpairs + \"/selfcomb.txt\" output_file = path_allpairs +", "+ \"/script\" log_path = report_path + \"/log_\" device_path = \"/home/app/content/tct/\"" ]
[ "\"\"\"Command models to open a Thermocycler's lid.\"\"\" from __future__ import", "def __init__( self, state_view: StateView, equipment: EquipmentHandler, movement: MovementHandler, **unused_dependencies:", "not None: await thermocycler_hardware.open() return OpenLidResult() class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]): \"\"\"A", "state_view: StateView, equipment: EquipmentHandler, movement: MovementHandler, **unused_dependencies: object, ) ->", "over the trash # do not home plunger axes because", "from pydantic import BaseModel, Field from ..command import AbstractCommandImpl, BaseCommand,", "import StateView from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler OpenLidCommandType = Literal[\"thermocycler/openLid\"]", "params: OpenLidParams) -> OpenLidResult: \"\"\"Open a Thermocycler's lid.\"\"\" thermocycler_state =", "open a Thermocycler's lid.\"\"\" moduleId: str = Field(..., description=\"Unique ID", "= self._equipment.get_module_hardware_api( thermocycler_state.module_id ) # move the pipettes and gantry", "equipment self._movement = movement async def execute(self, params: OpenLidParams) ->", "Thermocycler's lid.\"\"\" class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]): \"\"\"Execution implementation of a Thermocycler's", "lid.\"\"\" thermocycler_state = self._state_view.modules.get_thermocycler_module_substate( params.moduleId ) thermocycler_hardware = self._equipment.get_module_hardware_api( thermocycler_state.module_id", "pipettes may be holding liquid await self._movement.home( [ MotorAxis.X, MotorAxis.Y,", "MotorAxis.Y, MotorAxis.RIGHT_Z, MotorAxis.LEFT_Z, ] ) if thermocycler_hardware is not None:", "Optional[OpenLidResult] _ImplementationCls: Type[OpenLidImpl] = OpenLidImpl class OpenLidCreate(BaseCommandCreate[OpenLidParams]): \"\"\"A request to", "OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]): \"\"\"Execution implementation of a Thermocycler's open lid command.\"\"\"", "params: OpenLidParams result: Optional[OpenLidResult] _ImplementationCls: Type[OpenLidImpl] = OpenLidImpl class OpenLidCreate(BaseCommandCreate[OpenLidParams]):", "\"\"\"Execution implementation of a Thermocycler's open lid command.\"\"\" def __init__(", "axes because pipettes may be holding liquid await self._movement.home( [", "<reponame>Opentrons/protocol_framework \"\"\"Command models to open a Thermocycler's lid.\"\"\" from __future__", "commandType: OpenLidCommandType = \"thermocycler/openLid\" params: OpenLidParams result: Optional[OpenLidResult] _ImplementationCls: Type[OpenLidImpl]", "OpenLidParams(BaseModel): \"\"\"Input parameters to open a Thermocycler's lid.\"\"\" moduleId: str", "params.moduleId ) thermocycler_hardware = self._equipment.get_module_hardware_api( thermocycler_state.module_id ) # move the", "class OpenLidParams(BaseModel): \"\"\"Input parameters to open a Thermocycler's lid.\"\"\" moduleId:", "a Thermocycler's lid.\"\"\" commandType: OpenLidCommandType = \"thermocycler/openLid\" params: OpenLidParams result:", "class OpenLidCreate(BaseCommandCreate[OpenLidParams]): \"\"\"A request to open a Thermocycler's lid.\"\"\" commandType:", "the Thermocycler.\") class OpenLidResult(BaseModel): \"\"\"Result data from opening a Thermocycler's", "= Field(..., description=\"Unique ID of the Thermocycler.\") class OpenLidResult(BaseModel): \"\"\"Result", "MotorAxis if TYPE_CHECKING: from opentrons.protocol_engine.state import StateView from opentrons.protocol_engine.execution import", "import Literal, Type from pydantic import BaseModel, Field from ..command", "= Literal[\"thermocycler/openLid\"] class OpenLidParams(BaseModel): \"\"\"Input parameters to open a Thermocycler's", "moduleId: str = Field(..., description=\"Unique ID of the Thermocycler.\") class", "implementation of a Thermocycler's open lid command.\"\"\" def __init__( self,", "movement: MovementHandler, **unused_dependencies: object, ) -> None: self._state_view = state_view", "state_view self._equipment = equipment self._movement = movement async def execute(self,", "thermocycler_hardware.open() return OpenLidResult() class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]): \"\"\"A command to open", "Type[OpenLidImpl] = OpenLidImpl class OpenLidCreate(BaseCommandCreate[OpenLidParams]): \"\"\"A request to open a", "a Thermocycler's lid.\"\"\" commandType: OpenLidCommandType = \"thermocycler/openLid\" params: OpenLidParams _CommandCls:", "= state_view self._equipment = equipment self._movement = movement async def", "a Thermocycler's lid.\"\"\" class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]): \"\"\"Execution implementation of a", "models to open a Thermocycler's lid.\"\"\" from __future__ import annotations", "Literal, Type from pydantic import BaseModel, Field from ..command import", "lid.\"\"\" from __future__ import annotations from typing import Optional, TYPE_CHECKING", "thermocycler_hardware = self._equipment.get_module_hardware_api( thermocycler_state.module_id ) # move the pipettes and", "Thermocycler's lid.\"\"\" moduleId: str = Field(..., description=\"Unique ID of the", "MotorAxis.LEFT_Z, ] ) if thermocycler_hardware is not None: await thermocycler_hardware.open()", ") thermocycler_hardware = self._equipment.get_module_hardware_api( thermocycler_state.module_id ) # move the pipettes", "async def execute(self, params: OpenLidParams) -> OpenLidResult: \"\"\"Open a Thermocycler's", "= OpenLidImpl class OpenLidCreate(BaseCommandCreate[OpenLidParams]): \"\"\"A request to open a Thermocycler's", "from opentrons.protocol_engine.state import StateView from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler OpenLidCommandType", "StateView from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler OpenLidCommandType = Literal[\"thermocycler/openLid\"] class", "to open a Thermocycler's lid.\"\"\" moduleId: str = Field(..., description=\"Unique", "self._state_view = state_view self._equipment = equipment self._movement = movement async", "movement async def execute(self, params: OpenLidParams) -> OpenLidResult: \"\"\"Open a", "_ImplementationCls: Type[OpenLidImpl] = OpenLidImpl class OpenLidCreate(BaseCommandCreate[OpenLidParams]): \"\"\"A request to open", "move the pipettes and gantry over the trash # do", "liquid await self._movement.home( [ MotorAxis.X, MotorAxis.Y, MotorAxis.RIGHT_Z, MotorAxis.LEFT_Z, ] )", "import annotations from typing import Optional, TYPE_CHECKING from typing_extensions import", "OpenLidResult]): \"\"\"Execution implementation of a Thermocycler's open lid command.\"\"\" def", "Thermocycler's lid.\"\"\" thermocycler_state = self._state_view.modules.get_thermocycler_module_substate( params.moduleId ) thermocycler_hardware = self._equipment.get_module_hardware_api(", "is not None: await thermocycler_hardware.open() return OpenLidResult() class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]):", "Literal[\"thermocycler/openLid\"] class OpenLidParams(BaseModel): \"\"\"Input parameters to open a Thermocycler's lid.\"\"\"", "object, ) -> None: self._state_view = state_view self._equipment = equipment", "-> None: self._state_view = state_view self._equipment = equipment self._movement =", "of a Thermocycler's open lid command.\"\"\" def __init__( self, state_view:", "BaseCommand, BaseCommandCreate from opentrons.protocol_engine.types import MotorAxis if TYPE_CHECKING: from opentrons.protocol_engine.state", "None: self._state_view = state_view self._equipment = equipment self._movement = movement", "thermocycler_state = self._state_view.modules.get_thermocycler_module_substate( params.moduleId ) thermocycler_hardware = self._equipment.get_module_hardware_api( thermocycler_state.module_id )", "self, state_view: StateView, equipment: EquipmentHandler, movement: MovementHandler, **unused_dependencies: object, )", "not home plunger axes because pipettes may be holding liquid", "MotorAxis.RIGHT_Z, MotorAxis.LEFT_Z, ] ) if thermocycler_hardware is not None: await", "\"\"\"Open a Thermocycler's lid.\"\"\" thermocycler_state = self._state_view.modules.get_thermocycler_module_substate( params.moduleId ) thermocycler_hardware", "result: Optional[OpenLidResult] _ImplementationCls: Type[OpenLidImpl] = OpenLidImpl class OpenLidCreate(BaseCommandCreate[OpenLidParams]): \"\"\"A request", ") # move the pipettes and gantry over the trash", "= movement async def execute(self, params: OpenLidParams) -> OpenLidResult: \"\"\"Open", "self._movement = movement async def execute(self, params: OpenLidParams) -> OpenLidResult:", "from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate from opentrons.protocol_engine.types import MotorAxis", "lid.\"\"\" commandType: OpenLidCommandType = \"thermocycler/openLid\" params: OpenLidParams result: Optional[OpenLidResult] _ImplementationCls:", "a Thermocycler's lid.\"\"\" thermocycler_state = self._state_view.modules.get_thermocycler_module_substate( params.moduleId ) thermocycler_hardware =", "EquipmentHandler, movement: MovementHandler, **unused_dependencies: object, ) -> None: self._state_view =", "thermocycler_hardware is not None: await thermocycler_hardware.open() return OpenLidResult() class OpenLid(BaseCommand[OpenLidParams,", "pipettes and gantry over the trash # do not home", "class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]): \"\"\"Execution implementation of a Thermocycler's open lid", "import Optional, TYPE_CHECKING from typing_extensions import Literal, Type from pydantic", "Field from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate from opentrons.protocol_engine.types import", "import EquipmentHandler, MovementHandler OpenLidCommandType = Literal[\"thermocycler/openLid\"] class OpenLidParams(BaseModel): \"\"\"Input parameters", "typing import Optional, TYPE_CHECKING from typing_extensions import Literal, Type from", "a Thermocycler's lid.\"\"\" moduleId: str = Field(..., description=\"Unique ID of", "opening a Thermocycler's lid.\"\"\" class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]): \"\"\"Execution implementation of", "annotations from typing import Optional, TYPE_CHECKING from typing_extensions import Literal,", "= \"thermocycler/openLid\" params: OpenLidParams result: Optional[OpenLidResult] _ImplementationCls: Type[OpenLidImpl] = OpenLidImpl", "do not home plunger axes because pipettes may be holding", "from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler OpenLidCommandType = Literal[\"thermocycler/openLid\"] class OpenLidParams(BaseModel):", "the pipettes and gantry over the trash # do not", "because pipettes may be holding liquid await self._movement.home( [ MotorAxis.X,", "# do not home plunger axes because pipettes may be", "to open a Thermocycler's lid.\"\"\" from __future__ import annotations from", "TYPE_CHECKING from typing_extensions import Literal, Type from pydantic import BaseModel,", "BaseModel, Field from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate from opentrons.protocol_engine.types", "**unused_dependencies: object, ) -> None: self._state_view = state_view self._equipment =", "thermocycler_state.module_id ) # move the pipettes and gantry over the", "OpenLidResult() class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]): \"\"\"A command to open a Thermocycler's", "ID of the Thermocycler.\") class OpenLidResult(BaseModel): \"\"\"Result data from opening", "__future__ import annotations from typing import Optional, TYPE_CHECKING from typing_extensions", "commandType: OpenLidCommandType = \"thermocycler/openLid\" params: OpenLidParams _CommandCls: Type[OpenLid] = OpenLid", "EquipmentHandler, MovementHandler OpenLidCommandType = Literal[\"thermocycler/openLid\"] class OpenLidParams(BaseModel): \"\"\"Input parameters to", "open a Thermocycler's lid.\"\"\" from __future__ import annotations from typing", "self._movement.home( [ MotorAxis.X, MotorAxis.Y, MotorAxis.RIGHT_Z, MotorAxis.LEFT_Z, ] ) if thermocycler_hardware", "from opentrons.protocol_engine.types import MotorAxis if TYPE_CHECKING: from opentrons.protocol_engine.state import StateView", "str = Field(..., description=\"Unique ID of the Thermocycler.\") class OpenLidResult(BaseModel):", "data from opening a Thermocycler's lid.\"\"\" class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]): \"\"\"Execution", "\"\"\"A command to open a Thermocycler's lid.\"\"\" commandType: OpenLidCommandType =", "Type from pydantic import BaseModel, Field from ..command import AbstractCommandImpl,", "open lid command.\"\"\" def __init__( self, state_view: StateView, equipment: EquipmentHandler,", "\"thermocycler/openLid\" params: OpenLidParams result: Optional[OpenLidResult] _ImplementationCls: Type[OpenLidImpl] = OpenLidImpl class", ") if thermocycler_hardware is not None: await thermocycler_hardware.open() return OpenLidResult()", "OpenLid(BaseCommand[OpenLidParams, OpenLidResult]): \"\"\"A command to open a Thermocycler's lid.\"\"\" commandType:", "OpenLidResult]): \"\"\"A command to open a Thermocycler's lid.\"\"\" commandType: OpenLidCommandType", "of the Thermocycler.\") class OpenLidResult(BaseModel): \"\"\"Result data from opening a", "gantry over the trash # do not home plunger axes", "return OpenLidResult() class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]): \"\"\"A command to open a", "execute(self, params: OpenLidParams) -> OpenLidResult: \"\"\"Open a Thermocycler's lid.\"\"\" thermocycler_state", "Thermocycler's open lid command.\"\"\" def __init__( self, state_view: StateView, equipment:", "await thermocycler_hardware.open() return OpenLidResult() class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]): \"\"\"A command to", "self._state_view.modules.get_thermocycler_module_substate( params.moduleId ) thermocycler_hardware = self._equipment.get_module_hardware_api( thermocycler_state.module_id ) # move", "OpenLidResult(BaseModel): \"\"\"Result data from opening a Thermocycler's lid.\"\"\" class OpenLidImpl(AbstractCommandImpl[OpenLidParams,", "Optional, TYPE_CHECKING from typing_extensions import Literal, Type from pydantic import", "class OpenLidResult(BaseModel): \"\"\"Result data from opening a Thermocycler's lid.\"\"\" class", "lid.\"\"\" commandType: OpenLidCommandType = \"thermocycler/openLid\" params: OpenLidParams _CommandCls: Type[OpenLid] =", "import BaseModel, Field from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate from", "opentrons.protocol_engine.types import MotorAxis if TYPE_CHECKING: from opentrons.protocol_engine.state import StateView from", "= equipment self._movement = movement async def execute(self, params: OpenLidParams)", "from __future__ import annotations from typing import Optional, TYPE_CHECKING from", "the trash # do not home plunger axes because pipettes", "opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler OpenLidCommandType = Literal[\"thermocycler/openLid\"] class OpenLidParams(BaseModel): \"\"\"Input", "equipment: EquipmentHandler, movement: MovementHandler, **unused_dependencies: object, ) -> None: self._state_view", "None: await thermocycler_hardware.open() return OpenLidResult() class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]): \"\"\"A command", "trash # do not home plunger axes because pipettes may", "home plunger axes because pipettes may be holding liquid await", "\"\"\"Result data from opening a Thermocycler's lid.\"\"\" class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]):", "lid command.\"\"\" def __init__( self, state_view: StateView, equipment: EquipmentHandler, movement:", "OpenLidParams result: Optional[OpenLidResult] _ImplementationCls: Type[OpenLidImpl] = OpenLidImpl class OpenLidCreate(BaseCommandCreate[OpenLidParams]): \"\"\"A", "lid.\"\"\" class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]): \"\"\"Execution implementation of a Thermocycler's open", "await self._movement.home( [ MotorAxis.X, MotorAxis.Y, MotorAxis.RIGHT_Z, MotorAxis.LEFT_Z, ] ) if", "OpenLidCreate(BaseCommandCreate[OpenLidParams]): \"\"\"A request to open a Thermocycler's lid.\"\"\" commandType: OpenLidCommandType", "command to open a Thermocycler's lid.\"\"\" commandType: OpenLidCommandType = \"thermocycler/openLid\"", "Thermocycler's lid.\"\"\" commandType: OpenLidCommandType = \"thermocycler/openLid\" params: OpenLidParams _CommandCls: Type[OpenLid]", "to open a Thermocycler's lid.\"\"\" commandType: OpenLidCommandType = \"thermocycler/openLid\" params:", "Field(..., description=\"Unique ID of the Thermocycler.\") class OpenLidResult(BaseModel): \"\"\"Result data", "\"\"\"Input parameters to open a Thermocycler's lid.\"\"\" moduleId: str =", "typing_extensions import Literal, Type from pydantic import BaseModel, Field from", "plunger axes because pipettes may be holding liquid await self._movement.home(", "__init__( self, state_view: StateView, equipment: EquipmentHandler, movement: MovementHandler, **unused_dependencies: object,", "class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]): \"\"\"A command to open a Thermocycler's lid.\"\"\"", "OpenLidResult: \"\"\"Open a Thermocycler's lid.\"\"\" thermocycler_state = self._state_view.modules.get_thermocycler_module_substate( params.moduleId )", "AbstractCommandImpl, BaseCommand, BaseCommandCreate from opentrons.protocol_engine.types import MotorAxis if TYPE_CHECKING: from", "MotorAxis.X, MotorAxis.Y, MotorAxis.RIGHT_Z, MotorAxis.LEFT_Z, ] ) if thermocycler_hardware is not", "holding liquid await self._movement.home( [ MotorAxis.X, MotorAxis.Y, MotorAxis.RIGHT_Z, MotorAxis.LEFT_Z, ]", "import AbstractCommandImpl, BaseCommand, BaseCommandCreate from opentrons.protocol_engine.types import MotorAxis if TYPE_CHECKING:", "Thermocycler's lid.\"\"\" commandType: OpenLidCommandType = \"thermocycler/openLid\" params: OpenLidParams result: Optional[OpenLidResult]", "and gantry over the trash # do not home plunger", "Thermocycler's lid.\"\"\" from __future__ import annotations from typing import Optional,", "[ MotorAxis.X, MotorAxis.Y, MotorAxis.RIGHT_Z, MotorAxis.LEFT_Z, ] ) if thermocycler_hardware is", "a Thermocycler's open lid command.\"\"\" def __init__( self, state_view: StateView,", "OpenLidParams) -> OpenLidResult: \"\"\"Open a Thermocycler's lid.\"\"\" thermocycler_state = self._state_view.modules.get_thermocycler_module_substate(", ") -> None: self._state_view = state_view self._equipment = equipment self._movement", "from typing_extensions import Literal, Type from pydantic import BaseModel, Field", "if thermocycler_hardware is not None: await thermocycler_hardware.open() return OpenLidResult() class", "lid.\"\"\" moduleId: str = Field(..., description=\"Unique ID of the Thermocycler.\")", "a Thermocycler's lid.\"\"\" from __future__ import annotations from typing import", "from opening a Thermocycler's lid.\"\"\" class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]): \"\"\"Execution implementation", "import MotorAxis if TYPE_CHECKING: from opentrons.protocol_engine.state import StateView from opentrons.protocol_engine.execution", "TYPE_CHECKING: from opentrons.protocol_engine.state import StateView from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler", "-> OpenLidResult: \"\"\"Open a Thermocycler's lid.\"\"\" thermocycler_state = self._state_view.modules.get_thermocycler_module_substate( params.moduleId", "pydantic import BaseModel, Field from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate", "OpenLidImpl class OpenLidCreate(BaseCommandCreate[OpenLidParams]): \"\"\"A request to open a Thermocycler's lid.\"\"\"", "OpenLidCommandType = \"thermocycler/openLid\" params: OpenLidParams result: Optional[OpenLidResult] _ImplementationCls: Type[OpenLidImpl] =", "be holding liquid await self._movement.home( [ MotorAxis.X, MotorAxis.Y, MotorAxis.RIGHT_Z, MotorAxis.LEFT_Z,", "parameters to open a Thermocycler's lid.\"\"\" moduleId: str = Field(...,", "request to open a Thermocycler's lid.\"\"\" commandType: OpenLidCommandType = \"thermocycler/openLid\"", "MovementHandler OpenLidCommandType = Literal[\"thermocycler/openLid\"] class OpenLidParams(BaseModel): \"\"\"Input parameters to open", "may be holding liquid await self._movement.home( [ MotorAxis.X, MotorAxis.Y, MotorAxis.RIGHT_Z,", "\"\"\"A request to open a Thermocycler's lid.\"\"\" commandType: OpenLidCommandType =", "] ) if thermocycler_hardware is not None: await thermocycler_hardware.open() return", "command.\"\"\" def __init__( self, state_view: StateView, equipment: EquipmentHandler, movement: MovementHandler,", "description=\"Unique ID of the Thermocycler.\") class OpenLidResult(BaseModel): \"\"\"Result data from", "MovementHandler, **unused_dependencies: object, ) -> None: self._state_view = state_view self._equipment", "= self._state_view.modules.get_thermocycler_module_substate( params.moduleId ) thermocycler_hardware = self._equipment.get_module_hardware_api( thermocycler_state.module_id ) #", "self._equipment.get_module_hardware_api( thermocycler_state.module_id ) # move the pipettes and gantry over", "Thermocycler.\") class OpenLidResult(BaseModel): \"\"\"Result data from opening a Thermocycler's lid.\"\"\"", "BaseCommandCreate from opentrons.protocol_engine.types import MotorAxis if TYPE_CHECKING: from opentrons.protocol_engine.state import", "OpenLidCommandType = Literal[\"thermocycler/openLid\"] class OpenLidParams(BaseModel): \"\"\"Input parameters to open a", "def execute(self, params: OpenLidParams) -> OpenLidResult: \"\"\"Open a Thermocycler's lid.\"\"\"", "self._equipment = equipment self._movement = movement async def execute(self, params:", "# move the pipettes and gantry over the trash #", "opentrons.protocol_engine.state import StateView from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler OpenLidCommandType =", "from typing import Optional, TYPE_CHECKING from typing_extensions import Literal, Type", "open a Thermocycler's lid.\"\"\" commandType: OpenLidCommandType = \"thermocycler/openLid\" params: OpenLidParams", "..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate from opentrons.protocol_engine.types import MotorAxis if", "if TYPE_CHECKING: from opentrons.protocol_engine.state import StateView from opentrons.protocol_engine.execution import EquipmentHandler,", "StateView, equipment: EquipmentHandler, movement: MovementHandler, **unused_dependencies: object, ) -> None:" ]
[ "characters inside a text string :param text: :param chars_to_mapping: :return:", "inside a text string :param text: :param chars_to_mapping: :return: \"\"\"", "This function is used to replace a dictionary of characters", "text: :param chars_to_mapping: :return: \"\"\" import re pattern = \"|\".join(map(re.escape,", "text string :param text: :param chars_to_mapping: :return: \"\"\" import re", "to replace a dictionary of characters inside a text string", "dictionary of characters inside a text string :param text: :param", ":param chars_to_mapping: :return: \"\"\" import re pattern = \"|\".join(map(re.escape, chars_to_mapping.keys()))", "a text string :param text: :param chars_to_mapping: :return: \"\"\" import", "string :param text: :param chars_to_mapping: :return: \"\"\" import re pattern", "chars_to_mapping: :return: \"\"\" import re pattern = \"|\".join(map(re.escape, chars_to_mapping.keys())) return", "a dictionary of characters inside a text string :param text:", ":param text: :param chars_to_mapping: :return: \"\"\" import re pattern =", "used to replace a dictionary of characters inside a text", "\"\"\" This function is used to replace a dictionary of", "dict): \"\"\" This function is used to replace a dictionary", "re pattern = \"|\".join(map(re.escape, chars_to_mapping.keys())) return re.sub(pattern, lambda m: chars_to_mapping[m.group()],", "is used to replace a dictionary of characters inside a", "function is used to replace a dictionary of characters inside", "def multiple_replace(text: str, chars_to_mapping: dict): \"\"\" This function is used", "multiple_replace(text: str, chars_to_mapping: dict): \"\"\" This function is used to", "of characters inside a text string :param text: :param chars_to_mapping:", "chars_to_mapping: dict): \"\"\" This function is used to replace a", "replace a dictionary of characters inside a text string :param", "\"\"\" import re pattern = \"|\".join(map(re.escape, chars_to_mapping.keys())) return re.sub(pattern, lambda", "import re pattern = \"|\".join(map(re.escape, chars_to_mapping.keys())) return re.sub(pattern, lambda m:", "pattern = \"|\".join(map(re.escape, chars_to_mapping.keys())) return re.sub(pattern, lambda m: chars_to_mapping[m.group()], str(text))", ":return: \"\"\" import re pattern = \"|\".join(map(re.escape, chars_to_mapping.keys())) return re.sub(pattern,", "str, chars_to_mapping: dict): \"\"\" This function is used to replace" ]
[ "models here. class Gamedoc(models.Model): link = models.URLField(max_length=500) title = models.CharField(max_length=500)", "= models.URLField(max_length=500) title = models.CharField(max_length=500) repo_name = models.CharField(max_length=512, blank=True, null=True)", "link = models.URLField(max_length=500) title = models.CharField(max_length=500) repo_name = models.CharField(max_length=512, blank=True,", "repo_name = models.CharField(max_length=512, blank=True, null=True) user_name = models.CharField(max_length=512, blank=True, null=True)", "<reponame>mehrbodjavadi79/AIC21-Backend<filename>apps/gamedoc/models.py from django.db import models # Create your models here.", "null=True) user_name = models.CharField(max_length=512, blank=True, null=True) def __str__(self): return f'{self.title}'", "import models # Create your models here. class Gamedoc(models.Model): link", "= models.CharField(max_length=512, blank=True, null=True) user_name = models.CharField(max_length=512, blank=True, null=True) def", "here. class Gamedoc(models.Model): link = models.URLField(max_length=500) title = models.CharField(max_length=500) repo_name", "= models.CharField(max_length=500) repo_name = models.CharField(max_length=512, blank=True, null=True) user_name = models.CharField(max_length=512,", "models.CharField(max_length=512, blank=True, null=True) user_name = models.CharField(max_length=512, blank=True, null=True) def __str__(self):", "Gamedoc(models.Model): link = models.URLField(max_length=500) title = models.CharField(max_length=500) repo_name = models.CharField(max_length=512,", "models.URLField(max_length=500) title = models.CharField(max_length=500) repo_name = models.CharField(max_length=512, blank=True, null=True) user_name", "Create your models here. class Gamedoc(models.Model): link = models.URLField(max_length=500) title", "your models here. class Gamedoc(models.Model): link = models.URLField(max_length=500) title =", "models.CharField(max_length=500) repo_name = models.CharField(max_length=512, blank=True, null=True) user_name = models.CharField(max_length=512, blank=True,", "blank=True, null=True) user_name = models.CharField(max_length=512, blank=True, null=True) def __str__(self): return", "class Gamedoc(models.Model): link = models.URLField(max_length=500) title = models.CharField(max_length=500) repo_name =", "title = models.CharField(max_length=500) repo_name = models.CharField(max_length=512, blank=True, null=True) user_name =", "models # Create your models here. class Gamedoc(models.Model): link =", "django.db import models # Create your models here. class Gamedoc(models.Model):", "from django.db import models # Create your models here. class", "# Create your models here. class Gamedoc(models.Model): link = models.URLField(max_length=500)" ]
[ "import admin from users.models import Friendship admin.site.register(Friendship) # Register your", "admin from users.models import Friendship admin.site.register(Friendship) # Register your models", "from users.models import Friendship admin.site.register(Friendship) # Register your models here.", "from django.contrib import admin from users.models import Friendship admin.site.register(Friendship) #", "django.contrib import admin from users.models import Friendship admin.site.register(Friendship) # Register" ]
[ "%s' % doc.paragraphs[1].runs[0].text) print('2nd paragraph run: %s' % doc.paragraphs[1].runs[1].text) print('3rd", "print('3rd paragraph run: %s' % doc.paragraphs[1].runs[2].text) print('4th paragraph run: %s'", "number: %s' % len(doc.paragraphs)) print('1st paragraph: %s' % doc.paragraphs[0].text) print('2nd", "%s' % len(doc.paragraphs)) print('1st paragraph: %s' % doc.paragraphs[0].text) print('2nd paragraph:", "print('1st paragraph: %s' % doc.paragraphs[0].text) print('2nd paragraph: %s' % doc.paragraphs[1].text)", "%s' % len(doc.paragraphs[1].runs)) print('1st paragraph run: %s' % doc.paragraphs[1].runs[0].text) print('2nd", "paragraph run: %s' % doc.paragraphs[1].runs[0].text) print('2nd paragraph run: %s' %", "= docx.Document('demo.docx') print('paragraphs number: %s' % len(doc.paragraphs)) print('1st paragraph: %s'", "len(doc.paragraphs[1].runs)) print('1st paragraph run: %s' % doc.paragraphs[1].runs[0].text) print('2nd paragraph run:", "% doc.paragraphs[1].runs[0].text) print('2nd paragraph run: %s' % doc.paragraphs[1].runs[1].text) print('3rd paragraph", "print('paragraphs number: %s' % len(doc.paragraphs)) print('1st paragraph: %s' % doc.paragraphs[0].text)", "docx.Document('demo.docx') print('paragraphs number: %s' % len(doc.paragraphs)) print('1st paragraph: %s' %", "paragraph run: %s' % doc.paragraphs[1].runs[2].text) print('4th paragraph run: %s' %", "%s' % doc.paragraphs[1].runs[1].text) print('3rd paragraph run: %s' % doc.paragraphs[1].runs[2].text) print('4th", "print('1st paragraph run: %s' % doc.paragraphs[1].runs[0].text) print('2nd paragraph run: %s'", "paragraph run: %s' % doc.paragraphs[1].runs[1].text) print('3rd paragraph run: %s' %", "docx doc = docx.Document('demo.docx') print('paragraphs number: %s' % len(doc.paragraphs)) print('1st", "run: %s' % doc.paragraphs[1].runs[0].text) print('2nd paragraph run: %s' % doc.paragraphs[1].runs[1].text)", "paragraph: %s' % doc.paragraphs[1].text) print('paragraphs runs: %s' % len(doc.paragraphs[1].runs)) print('1st", "print('2nd paragraph run: %s' % doc.paragraphs[1].runs[1].text) print('3rd paragraph run: %s'", "doc.paragraphs[1].text) print('paragraphs runs: %s' % len(doc.paragraphs[1].runs)) print('1st paragraph run: %s'", "doc.paragraphs[0].text) print('2nd paragraph: %s' % doc.paragraphs[1].text) print('paragraphs runs: %s' %", "run: %s' % doc.paragraphs[1].runs[1].text) print('3rd paragraph run: %s' % doc.paragraphs[1].runs[2].text)", "doc = docx.Document('demo.docx') print('paragraphs number: %s' % len(doc.paragraphs)) print('1st paragraph:", "doc.paragraphs[1].runs[0].text) print('2nd paragraph run: %s' % doc.paragraphs[1].runs[1].text) print('3rd paragraph run:", "%s' % doc.paragraphs[1].text) print('paragraphs runs: %s' % len(doc.paragraphs[1].runs)) print('1st paragraph", "% len(doc.paragraphs[1].runs)) print('1st paragraph run: %s' % doc.paragraphs[1].runs[0].text) print('2nd paragraph", "import docx doc = docx.Document('demo.docx') print('paragraphs number: %s' % len(doc.paragraphs))", "% len(doc.paragraphs)) print('1st paragraph: %s' % doc.paragraphs[0].text) print('2nd paragraph: %s'", "% doc.paragraphs[1].runs[1].text) print('3rd paragraph run: %s' % doc.paragraphs[1].runs[2].text) print('4th paragraph", "% doc.paragraphs[1].text) print('paragraphs runs: %s' % len(doc.paragraphs[1].runs)) print('1st paragraph run:", "len(doc.paragraphs)) print('1st paragraph: %s' % doc.paragraphs[0].text) print('2nd paragraph: %s' %", "%s' % doc.paragraphs[0].text) print('2nd paragraph: %s' % doc.paragraphs[1].text) print('paragraphs runs:", "% doc.paragraphs[0].text) print('2nd paragraph: %s' % doc.paragraphs[1].text) print('paragraphs runs: %s'", "print('2nd paragraph: %s' % doc.paragraphs[1].text) print('paragraphs runs: %s' % len(doc.paragraphs[1].runs))", "print('paragraphs runs: %s' % len(doc.paragraphs[1].runs)) print('1st paragraph run: %s' %", "doc.paragraphs[1].runs[1].text) print('3rd paragraph run: %s' % doc.paragraphs[1].runs[2].text) print('4th paragraph run:", "run: %s' % doc.paragraphs[1].runs[2].text) print('4th paragraph run: %s' % doc.paragraphs[1].runs[3].text)", "runs: %s' % len(doc.paragraphs[1].runs)) print('1st paragraph run: %s' % doc.paragraphs[1].runs[0].text)", "paragraph: %s' % doc.paragraphs[0].text) print('2nd paragraph: %s' % doc.paragraphs[1].text) print('paragraphs" ]
[ "BD.sim.sims import sims_analysis, merge_sim_files, extract_run_rew from BD.util.paths import Paths def", "simulations of QL, QLP and GQL. from BD.sim.sims import sims_analysis,", "== '__main__': sims_analysis_BD() sims_analysis_GQL_BD() data = merge_sim_files(lambda x: True, Paths.rest_path", "sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/ql_ml_onpolicy_stats.csv', lambda conf: True ) def sims_analysis_GQL_BD():", "output_file = Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv' all_trials.to_csv(output_file, header=True) data = merge_sim_files(lambda", "import sims_analysis, merge_sim_files, extract_run_rew from BD.util.paths import Paths def sims_analysis_BD():", "+ 'BD/to_graph_data/gql_ml_onpolicy_stats.csv', lambda conf: True ) input_folder = Paths.rest_path +", "data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/') all_trials =", "= Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv', lambda conf:", ") def sims_analysis_GQL_BD(): input_folder = Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/' sims_analysis(input_folder, Paths.local_path", "sims_analysis_GQL_BD(): input_folder = Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql_ml_onpolicy_stats.csv',", "lambda conf: True ) input_folder = Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/' sims_analysis(input_folder,", "merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/') all_trials = extract_run_rew(data) output_file", "BD.util.paths import Paths def sims_analysis_BD(): input_folder = Paths.rest_path + 'archive/beh/qlp-ml-opt/qlp-ml/'", "'__main__': sims_analysis_BD() sims_analysis_GQL_BD() data = merge_sim_files(lambda x: True, Paths.rest_path +", "data generated from on policy simulations of QL, QLP and", "from on policy simulations of QL, QLP and GQL. from", "if __name__ == '__main__': sims_analysis_BD() sims_analysis_GQL_BD() data = merge_sim_files(lambda x:", "import Paths def sims_analysis_BD(): input_folder = Paths.rest_path + 'archive/beh/qlp-ml-opt/qlp-ml/' sims_analysis(input_folder,", "+ 'BD/to_graph_data/ql_ml_onpolicy_stats.csv', lambda conf: True ) def sims_analysis_GQL_BD(): input_folder =", "the data generated from on policy simulations of QL, QLP", "__name__ == '__main__': sims_analysis_BD() sims_analysis_GQL_BD() data = merge_sim_files(lambda x: True,", "merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/') all_trials = extract_run_rew(data) output_file", "sims_analysis_GQL_BD() data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/') all_trials", ") input_folder = Paths.rest_path + 'archive/beh/ql-ml-opt/ql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/ql_ml_onpolicy_stats.csv',", "from BD.util.paths import Paths def sims_analysis_BD(): input_folder = Paths.rest_path +", "= merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/') all_trials = extract_run_rew(data)", "input_folder = Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv', lambda", "= Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql_ml_onpolicy_stats.csv', lambda conf:", "sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv', lambda conf: True ) if __name__", "'archive/beh/gql-ml-opt/gql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql_ml_onpolicy_stats.csv', lambda conf: True ) input_folder", "x: True, Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/') all_trials = extract_run_rew(data) output_file =", "+ 'archive/beh/gql10d-ml-opt/gql10d-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv', lambda conf: True )", "all_trials = extract_run_rew(data) output_file = Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv' all_trials.to_csv(output_file, header=True)", "lambda conf: True ) def sims_analysis_GQL_BD(): input_folder = Paths.rest_path +", "input_folder = Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql_ml_onpolicy_stats.csv', lambda", "True ) input_folder = Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/' sims_analysis(input_folder, Paths.local_path +", "QL, QLP and GQL. from BD.sim.sims import sims_analysis, merge_sim_files, extract_run_rew", "'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv', lambda conf: True ) if __name__ == '__main__': sims_analysis_BD()", "= merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/') all_trials = extract_run_rew(data)", "+ 'archive/beh/ql-ml-opt/ql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/ql_ml_onpolicy_stats.csv', lambda conf: True )", "'BD/to_graph_data/qlp_ml_onpolicy__stats.csv', lambda conf: True ) input_folder = Paths.rest_path + 'archive/beh/ql-ml-opt/ql-ml/'", "def sims_analysis_BD(): input_folder = Paths.rest_path + 'archive/beh/qlp-ml-opt/qlp-ml/' sims_analysis(input_folder, Paths.local_path +", "Paths.rest_path + 'archive/beh/qlp-ml-opt/qlp-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv', lambda conf: True", "= Paths.rest_path + 'archive/beh/ql-ml-opt/ql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/ql_ml_onpolicy_stats.csv', lambda conf:", "+ 'archive/beh/gql-ml-opt/gql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql_ml_onpolicy_stats.csv', lambda conf: True )", "+ 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv', lambda conf: True ) if __name__ == '__main__':", "Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv', lambda conf: True", "'BD/to_graph_data/gql10d_all_data_ml.csv' all_trials.to_csv(output_file, header=True) data = merge_sim_files(lambda x: True, Paths.rest_path +", "Analysis the data generated from on policy simulations of QL,", "input_folder = Paths.rest_path + 'archive/beh/qlp-ml-opt/qlp-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv', lambda", "'archive/beh/ql-ml-opt/ql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/ql_ml_onpolicy_stats.csv', lambda conf: True ) def", "lambda conf: True ) if __name__ == '__main__': sims_analysis_BD() sims_analysis_GQL_BD()", "all_trials = extract_run_rew(data) output_file = Paths.local_path + 'BD/to_graph_data/gql_all_data_ml.csv' all_trials.to_csv(output_file, header=True)", "<reponame>kylmcgr/RL-RNN-SURF<filename>src/GL/sim/gql_ql_sims_ml_analysis.py # Analysis the data generated from on policy simulations", "'archive/beh/gql-ml-opt/gql-ml/') all_trials = extract_run_rew(data) output_file = Paths.local_path + 'BD/to_graph_data/gql_all_data_ml.csv' all_trials.to_csv(output_file,", "GQL. from BD.sim.sims import sims_analysis, merge_sim_files, extract_run_rew from BD.util.paths import", "sims_analysis_BD(): input_folder = Paths.rest_path + 'archive/beh/qlp-ml-opt/qlp-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv',", "Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/') all_trials = extract_run_rew(data) output_file = Paths.local_path +", "Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv' all_trials.to_csv(output_file, header=True) data = merge_sim_files(lambda x: True,", "on policy simulations of QL, QLP and GQL. from BD.sim.sims", "+ 'archive/beh/gql10d-ml-opt/gql10d-ml/') all_trials = extract_run_rew(data) output_file = Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv'", "data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/') all_trials =", "+ 'archive/beh/qlp-ml-opt/qlp-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv', lambda conf: True )", "Paths.local_path + 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv', lambda conf: True ) input_folder = Paths.rest_path", "'archive/beh/gql10d-ml-opt/gql10d-ml/') all_trials = extract_run_rew(data) output_file = Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv' all_trials.to_csv(output_file,", "'archive/beh/gql10d-ml-opt/gql10d-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv', lambda conf: True ) if", "sims_analysis_BD() sims_analysis_GQL_BD() data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/')", "Paths def sims_analysis_BD(): input_folder = Paths.rest_path + 'archive/beh/qlp-ml-opt/qlp-ml/' sims_analysis(input_folder, Paths.local_path", "of QL, QLP and GQL. from BD.sim.sims import sims_analysis, merge_sim_files,", "input_folder = Paths.rest_path + 'archive/beh/ql-ml-opt/ql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/ql_ml_onpolicy_stats.csv', lambda", "all_trials.to_csv(output_file, header=True) data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/')", "generated from on policy simulations of QL, QLP and GQL.", "x: True, Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/') all_trials = extract_run_rew(data) output_file =", "conf: True ) def sims_analysis_GQL_BD(): input_folder = Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/'", "True, Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/') all_trials = extract_run_rew(data) output_file = Paths.local_path", "Paths.local_path + 'BD/to_graph_data/gql_ml_onpolicy_stats.csv', lambda conf: True ) input_folder = Paths.rest_path", ") input_folder = Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv',", "def sims_analysis_GQL_BD(): input_folder = Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/' sims_analysis(input_folder, Paths.local_path +", "True ) def sims_analysis_GQL_BD(): input_folder = Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/' sims_analysis(input_folder,", "+ 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv', lambda conf: True ) input_folder = Paths.rest_path +", "conf: True ) input_folder = Paths.rest_path + 'archive/beh/ql-ml-opt/ql-ml/' sims_analysis(input_folder, Paths.local_path", "from BD.sim.sims import sims_analysis, merge_sim_files, extract_run_rew from BD.util.paths import Paths", "sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql_ml_onpolicy_stats.csv', lambda conf: True ) input_folder =", "# Analysis the data generated from on policy simulations of", "True, Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/') all_trials = extract_run_rew(data) output_file = Paths.local_path", "True ) input_folder = Paths.rest_path + 'archive/beh/ql-ml-opt/ql-ml/' sims_analysis(input_folder, Paths.local_path +", "= Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv' all_trials.to_csv(output_file, header=True) data = merge_sim_files(lambda x:", "conf: True ) if __name__ == '__main__': sims_analysis_BD() sims_analysis_GQL_BD() data", "'archive/beh/qlp-ml-opt/qlp-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv', lambda conf: True ) input_folder", "QLP and GQL. from BD.sim.sims import sims_analysis, merge_sim_files, extract_run_rew from", "Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/gql_ml_onpolicy_stats.csv', lambda conf: True", "Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/') all_trials = extract_run_rew(data) output_file = Paths.local_path +", "policy simulations of QL, QLP and GQL. from BD.sim.sims import", "extract_run_rew from BD.util.paths import Paths def sims_analysis_BD(): input_folder = Paths.rest_path", "sims_analysis, merge_sim_files, extract_run_rew from BD.util.paths import Paths def sims_analysis_BD(): input_folder", ") if __name__ == '__main__': sims_analysis_BD() sims_analysis_GQL_BD() data = merge_sim_files(lambda", "merge_sim_files, extract_run_rew from BD.util.paths import Paths def sims_analysis_BD(): input_folder =", "and GQL. from BD.sim.sims import sims_analysis, merge_sim_files, extract_run_rew from BD.util.paths", "= Paths.rest_path + 'archive/beh/qlp-ml-opt/qlp-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv', lambda conf:", "conf: True ) input_folder = Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/' sims_analysis(input_folder, Paths.local_path", "sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/qlp_ml_onpolicy__stats.csv', lambda conf: True ) input_folder =", "= extract_run_rew(data) output_file = Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv' all_trials.to_csv(output_file, header=True) data", "extract_run_rew(data) output_file = Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv' all_trials.to_csv(output_file, header=True) data =", "'BD/to_graph_data/ql_ml_onpolicy_stats.csv', lambda conf: True ) def sims_analysis_GQL_BD(): input_folder = Paths.rest_path", "header=True) data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/') all_trials", "Paths.local_path + 'BD/to_graph_data/gql10d_ml_onpolicy_stats.csv', lambda conf: True ) if __name__ ==", "lambda conf: True ) input_folder = Paths.rest_path + 'archive/beh/ql-ml-opt/ql-ml/' sims_analysis(input_folder,", "True ) if __name__ == '__main__': sims_analysis_BD() sims_analysis_GQL_BD() data =", "+ 'BD/to_graph_data/gql10d_all_data_ml.csv' all_trials.to_csv(output_file, header=True) data = merge_sim_files(lambda x: True, Paths.rest_path", "Paths.local_path + 'BD/to_graph_data/ql_ml_onpolicy_stats.csv', lambda conf: True ) def sims_analysis_GQL_BD(): input_folder", "Paths.rest_path + 'archive/beh/ql-ml-opt/ql-ml/' sims_analysis(input_folder, Paths.local_path + 'BD/to_graph_data/ql_ml_onpolicy_stats.csv', lambda conf: True", "+ 'archive/beh/gql-ml-opt/gql-ml/') all_trials = extract_run_rew(data) output_file = Paths.local_path + 'BD/to_graph_data/gql_all_data_ml.csv'", "'BD/to_graph_data/gql_ml_onpolicy_stats.csv', lambda conf: True ) input_folder = Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/'" ]
[ "- 0x{new_hash:08X}') code = code.replace( old_hash, f'0x{new_hash:08X}' ) with open('source/syscalls.c',", "code = code.replace( old_hash, f'0x{new_hash:08X}' ) with open('source/syscalls.c', 'w') as", "lambda v: ((v >> 8) & (2 ** 32 -", "for function_name, old_hash in matches: new_hash = get_function_hash(seed, function_name, is_syscall=False)", "range(len(name))] if len(s) == 2]: partial_name_short = struct.unpack('<H', segment.encode())[0] function_hash", "function_name, is_syscall=True): function_hash = seed function_name = function_name.replace('_', '') if", "SW2_SEED {old_seed}', f'#define SW2_SEED 0x{new_seed:08X}', 1 ) with open('include/syscalls.h', 'w')", "function_name[2:] name = function_name + '\\0' ror8 = lambda v:", "'w') as f: f.write(code) def get_function_hash(seed, function_name, is_syscall=True): function_hash =", "= f.read() for syscall_name in syscall_names: regex = re.compile(syscall_name +", "(0x[A-Fa-f0-9]{8})', re.DOTALL) match = re.search(regex, syscall_definitions) assert match is not", "main(): new_seed = random.randint(2 ** 28, 2 ** 32 -", ") with open('include/syscalls.h', 'w') as f: f.write(code) def get_function_hash(seed, function_name,", "code = f.read() for syscall_name in syscall_names: regex = re.compile(syscall_name", "syscall_names = set(syscall_names) syscall_definitions = code.split('#elif defined(__GNUC__)')[3] for syscall_name in", "matches: new_hash = get_function_hash(seed, function_name, is_syscall=False) code = code.replace( f'#define", "for syscall_name in syscall_names: regex = re.compile('NTSTATUS ' + syscall_name", "f.write(code) def main(): new_seed = random.randint(2 ** 28, 2 **", "with open(header_file, 'w') as f: f.write(code) def main(): new_seed =", "get_old_seed() replace_seed(old_seed, new_seed) replace_syscall_hashes(new_seed) replace_dinvoke_hashes(new_seed) if os.name == 'nt': print('done!", "found!' old_hash = match.group(1) new_hash = get_function_hash(seed, syscall_name) print(f'{syscall_name} ->", "== 'nt': print('done! recompile with:\\nnmake -f Makefile.msvc') else: print('done! recompile", "is_syscall=False) code = code.replace( f'#define {function_name}_SW2_HASH {old_hash}', f'#define {function_name}_SW2_HASH 0x{new_hash:08X}',", "| ((v << 24) & (2 ** 32 - 1))", "' + syscall_name + '\\\\(.*?\"mov ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL) match =", "random import struct def get_old_seed(): with open('include/syscalls.h') as f: code", "regex = re.compile(r'__declspec\\(naked\\) NTSTATUS (Nt[^(]+)') syscall_names = re.findall(regex, code) syscall_names", "** 28, 2 ** 32 - 1) #new_seed = 0x1337c0de", "if len(s) == 2]: partial_name_short = struct.unpack('<H', segment.encode())[0] function_hash ^=", "as f: code = f.read() regex = re.compile(r'__declspec\\(naked\\) NTSTATUS (Nt[^(]+)')", "f'#define {function_name}_SW2_HASH {old_hash}', f'#define {function_name}_SW2_HASH 0x{new_hash:08X}', 1 ) if matches:", "= f.read() regex = re.compile(r'__declspec\\(naked\\) NTSTATUS (Nt[^(]+)') syscall_names = re.findall(regex,", "{old_seed}', f'#define SW2_SEED 0x{new_seed:08X}', 1 ) with open('include/syscalls.h', 'w') as", "get_function_hash(seed, function_name, is_syscall=True): function_hash = seed function_name = function_name.replace('_', '')", "import os import re import glob import random import struct", "f: f.write(code) def replace_dinvoke_hashes(seed): for header_file in glob.glob(\"include/**/*.h\", recursive=True): with", "assert match is not None, f'hash of syscall {syscall_name} not", "replace_syscall_hashes(new_seed) replace_dinvoke_hashes(new_seed) if os.name == 'nt': print('done! recompile with:\\nnmake -f", "get_function_hash(seed, syscall_name) code = code.replace( f'0{old_hash}h', f'0{new_hash:08X}h', 1 ) with", "f.read() regex = re.compile(r'#define (\\w+)_SW2_HASH (0x[a-fA-F0-9]{8})') matches = re.findall(regex, code)", "'') if is_syscall and function_name[:2] == 'Nt': function_name = 'Zw'", "f: code = f.read() for syscall_name in syscall_names: regex =", "import struct def get_old_seed(): with open('include/syscalls.h') as f: code =", "coding: utf-8 -*- import os import re import glob import", "= re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})', code) assert match is not None,", "function_name, is_syscall=False) code = code.replace( f'#define {function_name}_SW2_HASH {old_hash}', f'#define {function_name}_SW2_HASH", "in [name[i:i + 2] for i in range(len(name))] if len(s)", "matches = re.findall(regex, code) for function_name, old_hash in matches: new_hash", "= re.findall(regex, code) syscall_names = set(syscall_names) syscall_definitions = code.split('#elif defined(__GNUC__)')[3]", "1 ) with open('include/syscalls.h', 'w') as f: f.write(code) def get_function_hash(seed,", "code = f.read() regex = re.compile(r'__declspec\\(naked\\) NTSTATUS (Nt[^(]+)') syscall_names =", "= get_function_hash(seed, function_name, is_syscall=False) code = code.replace( f'#define {function_name}_SW2_HASH {old_hash}',", "matches: with open(header_file, 'w') as f: f.write(code) def main(): new_seed", "& (2 ** 32 - 1)) | ((v << 24)", "{syscall_name} not found!' old_hash = match.group(1) new_hash = get_function_hash(seed, syscall_name)", "<< 24) & (2 ** 32 - 1)) for segment", "= f.read() regex = re.compile(r'#define (\\w+)_SW2_HASH (0x[a-fA-F0-9]{8})') matches = re.findall(regex,", "f'#define SW2_SEED {old_seed}', f'#define SW2_SEED 0x{new_seed:08X}', 1 ) with open('include/syscalls.h',", "re.findall(regex, code) for function_name, old_hash in matches: new_hash = get_function_hash(seed,", "is_syscall=True): function_hash = seed function_name = function_name.replace('_', '') if is_syscall", "2]: partial_name_short = struct.unpack('<H', segment.encode())[0] function_hash ^= partial_name_short + ror8(function_hash)", "code = f.read() code = code.replace( f'#define SW2_SEED {old_seed}', f'#define", "print('done! recompile with:\\nmake -f Makefile.mingw') if __name__ == '__main__': main()", "f.read() for syscall_name in syscall_names: regex = re.compile(syscall_name + '", "0x1337c0de old_seed = get_old_seed() replace_seed(old_seed, new_seed) replace_syscall_hashes(new_seed) replace_dinvoke_hashes(new_seed) if os.name", "function_hash = seed function_name = function_name.replace('_', '') if is_syscall and", "SW2_SEED 0x{new_seed:08X}', 1 ) with open('include/syscalls.h', 'w') as f: f.write(code)", "match.group(1) new_hash = get_function_hash(seed, syscall_name) code = code.replace( f'0{old_hash}h', f'0{new_hash:08X}h',", "re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})', code) assert match is not None, 'SW2_SEED", "not found!' old_hash = match.group(1) new_hash = get_function_hash(seed, syscall_name) code", "import glob import random import struct def get_old_seed(): with open('include/syscalls.h')", "open('source/syscalls.c') as f: code = f.read() regex = re.compile(r'__declspec\\(naked\\) NTSTATUS", "'Zw' + function_name[2:] name = function_name + '\\0' ror8 =", "(\\w+)_SW2_HASH (0x[a-fA-F0-9]{8})') matches = re.findall(regex, code) for function_name, old_hash in", "function_name[:2] == 'Nt': function_name = 'Zw' + function_name[2:] name =", "f: code = f.read() regex = re.compile(r'#define (\\w+)_SW2_HASH (0x[a-fA-F0-9]{8})') matches", "syscall_definitions) assert match is not None, f'hash of syscall {syscall_name}", "f.write(code) def replace_dinvoke_hashes(seed): for header_file in glob.glob(\"include/**/*.h\", recursive=True): with open(header_file)", "re.compile(syscall_name + ' PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL) match = re.search(regex,", "code) syscall_names = set(syscall_names) syscall_definitions = code.split('#elif defined(__GNUC__)')[3] for syscall_name", "syscall_name) print(f'{syscall_name} -> {old_hash} - 0x{new_hash:08X}') code = code.replace( old_hash,", "syscall_name in syscall_names: regex = re.compile(syscall_name + ' PROC.*?mov ecx,", "0([A-Fa-f0-9]{8})h', re.DOTALL) match = re.search(regex, code) assert match is not", "is_syscall and function_name[:2] == 'Nt': function_name = 'Zw' + function_name[2:]", "of syscall {syscall_name} not found!' old_hash = match.group(1) new_hash =", "syscall_name in syscall_names: regex = re.compile('NTSTATUS ' + syscall_name +", "code.replace( old_hash, f'0x{new_hash:08X}' ) with open('source/syscalls.c', 'w') as f: f.write(code)", "0x{new_hash:08X}') code = code.replace( old_hash, f'0x{new_hash:08X}' ) with open('source/syscalls.c', 'w')", "2 ** 32 - 1) #new_seed = 0x1337c0de old_seed =", "= random.randint(2 ** 28, 2 ** 32 - 1) #new_seed", "code = f.read() regex = re.compile(r'#define (\\w+)_SW2_HASH (0x[a-fA-F0-9]{8})') matches =", "1 ) if matches: with open(header_file, 'w') as f: f.write(code)", "partial_name_short = struct.unpack('<H', segment.encode())[0] function_hash ^= partial_name_short + ror8(function_hash) return", "(2 ** 32 - 1)) for segment in [s for", "old_hash, f'0x{new_hash:08X}' ) with open('source/syscalls.c', 'w') as f: f.write(code) with", "code = code.replace( f'#define SW2_SEED {old_seed}', f'#define SW2_SEED 0x{new_seed:08X}', 1", "(Nt[^(]+)') syscall_names = re.findall(regex, code) syscall_names = set(syscall_names) syscall_definitions =", "new_hash = get_function_hash(seed, syscall_name) code = code.replace( f'0{old_hash}h', f'0{new_hash:08X}h', 1", "#new_seed = 0x1337c0de old_seed = get_old_seed() replace_seed(old_seed, new_seed) replace_syscall_hashes(new_seed) replace_dinvoke_hashes(new_seed)", "print(f'{syscall_name} -> {old_hash} - 0x{new_hash:08X}') code = code.replace( old_hash, f'0x{new_hash:08X}'", "struct def get_old_seed(): with open('include/syscalls.h') as f: code = f.read()", "old_hash = match.group(1) new_hash = get_function_hash(seed, syscall_name) code = code.replace(", "= struct.unpack('<H', segment.encode())[0] function_hash ^= partial_name_short + ror8(function_hash) return function_hash", "is not None, 'SW2_SEED not found!' return match.group(1) def replace_seed(old_seed,", "syscall_names = re.findall(regex, code) syscall_names = set(syscall_names) syscall_definitions = code.split('#elif", "f'0{old_hash}h', f'0{new_hash:08X}h', 1 ) with open('source/syscalls-asm.asm', 'w') as f: f.write(code)", "f.write(code) def get_function_hash(seed, function_name, is_syscall=True): function_hash = seed function_name =", "== 2]: partial_name_short = struct.unpack('<H', segment.encode())[0] function_hash ^= partial_name_short +", "new_hash = get_function_hash(seed, function_name, is_syscall=False) code = code.replace( f'#define {function_name}_SW2_HASH", "= re.search(regex, code) assert match is not None, f'hash of", "f'hash of syscall {syscall_name} not found!' old_hash = match.group(1) new_hash", "- 1)) | ((v << 24) & (2 ** 32", "else: print('done! recompile with:\\nmake -f Makefile.mingw') if __name__ == '__main__':", "8) & (2 ** 32 - 1)) | ((v <<", "(0x[a-fA-F0-9]{8})') matches = re.findall(regex, code) for function_name, old_hash in matches:", "32 - 1)) for segment in [s for s in", "get_old_seed(): with open('include/syscalls.h') as f: code = f.read() match =", "for header_file in glob.glob(\"include/**/*.h\", recursive=True): with open(header_file) as f: code", "open(header_file) as f: code = f.read() regex = re.compile(r'#define (\\w+)_SW2_HASH", ") with open('source/syscalls.c', 'w') as f: f.write(code) with open('source/syscalls-asm.asm') as", "return function_hash def replace_syscall_hashes(seed): with open('source/syscalls.c') as f: code =", "new_hash = get_function_hash(seed, syscall_name) print(f'{syscall_name} -> {old_hash} - 0x{new_hash:08X}') code", "in range(len(name))] if len(s) == 2]: partial_name_short = struct.unpack('<H', segment.encode())[0]", "^= partial_name_short + ror8(function_hash) return function_hash def replace_syscall_hashes(seed): with open('source/syscalls.c')", "2] for i in range(len(name))] if len(s) == 2]: partial_name_short", "'w') as f: f.write(code) def main(): new_seed = random.randint(2 **", "new_seed = random.randint(2 ** 28, 2 ** 32 - 1)", "f.read() code = code.replace( f'#define SW2_SEED {old_seed}', f'#define SW2_SEED 0x{new_seed:08X}',", "as f: f.write(code) def main(): new_seed = random.randint(2 ** 28,", "with:\\nnmake -f Makefile.msvc') else: print('done! recompile with:\\nmake -f Makefile.mingw') if", "as f: f.write(code) def replace_dinvoke_hashes(seed): for header_file in glob.glob(\"include/**/*.h\", recursive=True):", "in glob.glob(\"include/**/*.h\", recursive=True): with open(header_file) as f: code = f.read()", "random.randint(2 ** 28, 2 ** 32 - 1) #new_seed =", "code.replace( f'#define {function_name}_SW2_HASH {old_hash}', f'#define {function_name}_SW2_HASH 0x{new_hash:08X}', 1 ) if", "partial_name_short + ror8(function_hash) return function_hash def replace_syscall_hashes(seed): with open('source/syscalls.c') as", "+ ror8(function_hash) return function_hash def replace_syscall_hashes(seed): with open('source/syscalls.c') as f:", "syscall {syscall_name} not found!' old_hash = match.group(1) new_hash = get_function_hash(seed,", "old_seed = get_old_seed() replace_seed(old_seed, new_seed) replace_syscall_hashes(new_seed) replace_dinvoke_hashes(new_seed) if os.name ==", "replace_syscall_hashes(seed): with open('source/syscalls.c') as f: code = f.read() regex =", "= re.search(regex, syscall_definitions) assert match is not None, f'hash of", "for segment in [s for s in [name[i:i + 2]", "match = re.search(regex, syscall_definitions) assert match is not None, f'hash", "= get_function_hash(seed, syscall_name) print(f'{syscall_name} -> {old_hash} - 0x{new_hash:08X}') code =", "code = f.read() match = re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})', code) assert", "** 32 - 1)) for segment in [s for s", "re.compile('NTSTATUS ' + syscall_name + '\\\\(.*?\"mov ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL) match", "and function_name[:2] == 'Nt': function_name = 'Zw' + function_name[2:] name", "32 - 1)) | ((v << 24) & (2 **", "regex = re.compile('NTSTATUS ' + syscall_name + '\\\\(.*?\"mov ecx, (0x[A-Fa-f0-9]{8})',", "if matches: with open(header_file, 'w') as f: f.write(code) def main():", "'Nt': function_name = 'Zw' + function_name[2:] name = function_name +", "-> {old_hash} - 0x{new_hash:08X}') code = code.replace( old_hash, f'0x{new_hash:08X}' )", "= lambda v: ((v >> 8) & (2 ** 32", "= code.split('#elif defined(__GNUC__)')[3] for syscall_name in syscall_names: regex = re.compile('NTSTATUS", "with open('source/syscalls.c', 'w') as f: f.write(code) with open('source/syscalls-asm.asm') as f:", "glob.glob(\"include/**/*.h\", recursive=True): with open(header_file) as f: code = f.read() regex", "' PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL) match = re.search(regex, code) assert", "0x{new_seed:08X}', 1 ) with open('include/syscalls.h', 'w') as f: f.write(code) def", "match = re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})', code) assert match is not", "code) for function_name, old_hash in matches: new_hash = get_function_hash(seed, function_name,", "len(s) == 2]: partial_name_short = struct.unpack('<H', segment.encode())[0] function_hash ^= partial_name_short", "re.findall(regex, code) syscall_names = set(syscall_names) syscall_definitions = code.split('#elif defined(__GNUC__)')[3] for", "as f: f.write(code) with open('source/syscalls-asm.asm') as f: code = f.read()", "f: f.write(code) def main(): new_seed = random.randint(2 ** 28, 2", "assert match is not None, 'SW2_SEED not found!' return match.group(1)", "{function_name}_SW2_HASH {old_hash}', f'#define {function_name}_SW2_HASH 0x{new_hash:08X}', 1 ) if matches: with", "re.DOTALL) match = re.search(regex, code) assert match is not None,", "function_name.replace('_', '') if is_syscall and function_name[:2] == 'Nt': function_name =", "-f Makefile.msvc') else: print('done! recompile with:\\nmake -f Makefile.mingw') if __name__", "f.write(code) with open('source/syscalls-asm.asm') as f: code = f.read() for syscall_name", "= set(syscall_names) syscall_definitions = code.split('#elif defined(__GNUC__)')[3] for syscall_name in syscall_names:", "function_hash ^= partial_name_short + ror8(function_hash) return function_hash def replace_syscall_hashes(seed): with", "code.replace( f'#define SW2_SEED {old_seed}', f'#define SW2_SEED 0x{new_seed:08X}', 1 ) with", "new_seed): with open('include/syscalls.h') as f: code = f.read() code =", "= code.replace( f'#define SW2_SEED {old_seed}', f'#define SW2_SEED 0x{new_seed:08X}', 1 )", "seed function_name = function_name.replace('_', '') if is_syscall and function_name[:2] ==", "1)) | ((v << 24) & (2 ** 32 -", "= f.read() match = re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})', code) assert match", "= get_old_seed() replace_seed(old_seed, new_seed) replace_syscall_hashes(new_seed) replace_dinvoke_hashes(new_seed) if os.name == 'nt':", "in syscall_names: regex = re.compile(syscall_name + ' PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h',", "open('include/syscalls.h') as f: code = f.read() match = re.search(r'#define SW2_SEED", "print('done! recompile with:\\nnmake -f Makefile.msvc') else: print('done! recompile with:\\nmake -f", "not found!' return match.group(1) def replace_seed(old_seed, new_seed): with open('include/syscalls.h') as", "= 0x1337c0de old_seed = get_old_seed() replace_seed(old_seed, new_seed) replace_syscall_hashes(new_seed) replace_dinvoke_hashes(new_seed) if", "1)) for segment in [s for s in [name[i:i +", "syscall_names: regex = re.compile(syscall_name + ' PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL)", "old_hash = match.group(1) new_hash = get_function_hash(seed, syscall_name) print(f'{syscall_name} -> {old_hash}", "-*- coding: utf-8 -*- import os import re import glob", "code) assert match is not None, 'SW2_SEED not found!' return", "code = code.replace( f'0{old_hash}h', f'0{new_hash:08X}h', 1 ) with open('source/syscalls-asm.asm', 'w')", "'nt': print('done! recompile with:\\nnmake -f Makefile.msvc') else: print('done! recompile with:\\nmake", "SW2_SEED (0x[a-fA-F0-9]{8})', code) assert match is not None, 'SW2_SEED not", "in [s for s in [name[i:i + 2] for i", "open('source/syscalls-asm.asm') as f: code = f.read() for syscall_name in syscall_names:", "old_hash in matches: new_hash = get_function_hash(seed, function_name, is_syscall=False) code =", "((v >> 8) & (2 ** 32 - 1)) |", "not found!' old_hash = match.group(1) new_hash = get_function_hash(seed, syscall_name) print(f'{syscall_name}", "set(syscall_names) syscall_definitions = code.split('#elif defined(__GNUC__)')[3] for syscall_name in syscall_names: regex", "new_seed) replace_syscall_hashes(new_seed) replace_dinvoke_hashes(new_seed) if os.name == 'nt': print('done! recompile with:\\nnmake", "found!' old_hash = match.group(1) new_hash = get_function_hash(seed, syscall_name) code =", "return match.group(1) def replace_seed(old_seed, new_seed): with open('include/syscalls.h') as f: code", "= code.replace( f'0{old_hash}h', f'0{new_hash:08X}h', 1 ) with open('source/syscalls-asm.asm', 'w') as", "with open('include/syscalls.h', 'w') as f: f.write(code) def get_function_hash(seed, function_name, is_syscall=True):", "& (2 ** 32 - 1)) for segment in [s", "open(header_file, 'w') as f: f.write(code) def main(): new_seed = random.randint(2", "syscall_definitions = code.split('#elif defined(__GNUC__)')[3] for syscall_name in syscall_names: regex =", "with open('source/syscalls-asm.asm', 'w') as f: f.write(code) def replace_dinvoke_hashes(seed): for header_file", "function_name = 'Zw' + function_name[2:] name = function_name + '\\0'", "1 ) with open('source/syscalls-asm.asm', 'w') as f: f.write(code) def replace_dinvoke_hashes(seed):", "replace_seed(old_seed, new_seed) replace_syscall_hashes(new_seed) replace_dinvoke_hashes(new_seed) if os.name == 'nt': print('done! recompile", "header_file in glob.glob(\"include/**/*.h\", recursive=True): with open(header_file) as f: code =", "glob import random import struct def get_old_seed(): with open('include/syscalls.h') as", "{old_hash}', f'#define {function_name}_SW2_HASH 0x{new_hash:08X}', 1 ) if matches: with open(header_file,", ") with open('source/syscalls-asm.asm', 'w') as f: f.write(code) def replace_dinvoke_hashes(seed): for", "with open('include/syscalls.h') as f: code = f.read() code = code.replace(", "open('include/syscalls.h', 'w') as f: f.write(code) def get_function_hash(seed, function_name, is_syscall=True): function_hash", "function_name, old_hash in matches: new_hash = get_function_hash(seed, function_name, is_syscall=False) code", "re.compile(r'#define (\\w+)_SW2_HASH (0x[a-fA-F0-9]{8})') matches = re.findall(regex, code) for function_name, old_hash", "NTSTATUS (Nt[^(]+)') syscall_names = re.findall(regex, code) syscall_names = set(syscall_names) syscall_definitions", "** 32 - 1)) | ((v << 24) & (2", "def replace_seed(old_seed, new_seed): with open('include/syscalls.h') as f: code = f.read()", "= code.replace( old_hash, f'0x{new_hash:08X}' ) with open('source/syscalls.c', 'w') as f:", "= code.replace( f'#define {function_name}_SW2_HASH {old_hash}', f'#define {function_name}_SW2_HASH 0x{new_hash:08X}', 1 )", "struct.unpack('<H', segment.encode())[0] function_hash ^= partial_name_short + ror8(function_hash) return function_hash def", "- 1) #new_seed = 0x1337c0de old_seed = get_old_seed() replace_seed(old_seed, new_seed)", "function_hash def replace_syscall_hashes(seed): with open('source/syscalls.c') as f: code = f.read()", "1) #new_seed = 0x1337c0de old_seed = get_old_seed() replace_seed(old_seed, new_seed) replace_syscall_hashes(new_seed)", "None, 'SW2_SEED not found!' return match.group(1) def replace_seed(old_seed, new_seed): with", "as f: code = f.read() regex = re.compile(r'#define (\\w+)_SW2_HASH (0x[a-fA-F0-9]{8})')", "get_function_hash(seed, function_name, is_syscall=False) code = code.replace( f'#define {function_name}_SW2_HASH {old_hash}', f'#define", "in syscall_names: regex = re.compile('NTSTATUS ' + syscall_name + '\\\\(.*?\"mov", "ror8(function_hash) return function_hash def replace_syscall_hashes(seed): with open('source/syscalls.c') as f: code", "not None, f'hash of syscall {syscall_name} not found!' old_hash =", "with open('source/syscalls-asm.asm') as f: code = f.read() for syscall_name in", "'w') as f: f.write(code) def replace_dinvoke_hashes(seed): for header_file in glob.glob(\"include/**/*.h\",", "PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL) match = re.search(regex, code) assert match", "replace_seed(old_seed, new_seed): with open('include/syscalls.h') as f: code = f.read() code", "(2 ** 32 - 1)) | ((v << 24) &", "replace_dinvoke_hashes(seed): for header_file in glob.glob(\"include/**/*.h\", recursive=True): with open(header_file) as f:", "ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL) match = re.search(regex, syscall_definitions) assert match is", "= re.compile(r'#define (\\w+)_SW2_HASH (0x[a-fA-F0-9]{8})') matches = re.findall(regex, code) for function_name,", "in matches: new_hash = get_function_hash(seed, function_name, is_syscall=False) code = code.replace(", "open('source/syscalls-asm.asm', 'w') as f: f.write(code) def replace_dinvoke_hashes(seed): for header_file in", "f.read() match = re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})', code) assert match is", "v: ((v >> 8) & (2 ** 32 - 1))", "'w') as f: f.write(code) with open('source/syscalls-asm.asm') as f: code =", "+ ' PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL) match = re.search(regex, code)", "code.split('#elif defined(__GNUC__)')[3] for syscall_name in syscall_names: regex = re.compile('NTSTATUS '", "= match.group(1) new_hash = get_function_hash(seed, syscall_name) code = code.replace( f'0{old_hash}h',", "with open(header_file) as f: code = f.read() regex = re.compile(r'#define", "f'#define {function_name}_SW2_HASH 0x{new_hash:08X}', 1 ) if matches: with open(header_file, 'w')", "f'#define SW2_SEED 0x{new_seed:08X}', 1 ) with open('include/syscalls.h', 'w') as f:", "match.group(1) new_hash = get_function_hash(seed, syscall_name) print(f'{syscall_name} -> {old_hash} - 0x{new_hash:08X}')", "[name[i:i + 2] for i in range(len(name))] if len(s) ==", "+ '\\0' ror8 = lambda v: ((v >> 8) &", "recursive=True): with open(header_file) as f: code = f.read() regex =", "f: code = f.read() match = re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})', code)", "f'0x{new_hash:08X}' ) with open('source/syscalls.c', 'w') as f: f.write(code) with open('source/syscalls-asm.asm')", "** 32 - 1) #new_seed = 0x1337c0de old_seed = get_old_seed()", "function_name = function_name.replace('_', '') if is_syscall and function_name[:2] == 'Nt':", "match is not None, f'hash of syscall {syscall_name} not found!'", "+ '\\\\(.*?\"mov ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL) match = re.search(regex, syscall_definitions) assert", "code.replace( f'0{old_hash}h', f'0{new_hash:08X}h', 1 ) with open('source/syscalls-asm.asm', 'w') as f:", "code) assert match is not None, f'hash of syscall {syscall_name}", "i in range(len(name))] if len(s) == 2]: partial_name_short = struct.unpack('<H',", "32 - 1) #new_seed = 0x1337c0de old_seed = get_old_seed() replace_seed(old_seed,", "'SW2_SEED not found!' return match.group(1) def replace_seed(old_seed, new_seed): with open('include/syscalls.h')", "= function_name.replace('_', '') if is_syscall and function_name[:2] == 'Nt': function_name", "-*- import os import re import glob import random import", "f: f.write(code) def get_function_hash(seed, function_name, is_syscall=True): function_hash = seed function_name", "re.compile(r'__declspec\\(naked\\) NTSTATUS (Nt[^(]+)') syscall_names = re.findall(regex, code) syscall_names = set(syscall_names)", "= re.findall(regex, code) for function_name, old_hash in matches: new_hash =", "ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL) match = re.search(regex, code) assert match is", "open('include/syscalls.h') as f: code = f.read() code = code.replace( f'#define", "segment.encode())[0] function_hash ^= partial_name_short + ror8(function_hash) return function_hash def replace_syscall_hashes(seed):", "as f: code = f.read() for syscall_name in syscall_names: regex", "def replace_dinvoke_hashes(seed): for header_file in glob.glob(\"include/**/*.h\", recursive=True): with open(header_file) as", "code = code.replace( f'#define {function_name}_SW2_HASH {old_hash}', f'#define {function_name}_SW2_HASH 0x{new_hash:08X}', 1", ">> 8) & (2 ** 32 - 1)) | ((v", "f: code = f.read() code = code.replace( f'#define SW2_SEED {old_seed}',", "ror8 = lambda v: ((v >> 8) & (2 **", "= re.compile(syscall_name + ' PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL) match =", "def main(): new_seed = random.randint(2 ** 28, 2 ** 32", "as f: f.write(code) def get_function_hash(seed, function_name, is_syscall=True): function_hash = seed", "python3 # -*- coding: utf-8 -*- import os import re", "re.search(regex, code) assert match is not None, f'hash of syscall", "recompile with:\\nnmake -f Makefile.msvc') else: print('done! recompile with:\\nmake -f Makefile.mingw')", "match = re.search(regex, code) assert match is not None, f'hash", "f: code = f.read() regex = re.compile(r'__declspec\\(naked\\) NTSTATUS (Nt[^(]+)') syscall_names", "function_name + '\\0' ror8 = lambda v: ((v >> 8)", "with open('source/syscalls.c') as f: code = f.read() regex = re.compile(r'__declspec\\(naked\\)", "syscall_names: regex = re.compile('NTSTATUS ' + syscall_name + '\\\\(.*?\"mov ecx,", "os import re import glob import random import struct def", "not None, 'SW2_SEED not found!' return match.group(1) def replace_seed(old_seed, new_seed):", "segment in [s for s in [name[i:i + 2] for", "syscall_name) code = code.replace( f'0{old_hash}h', f'0{new_hash:08X}h', 1 ) with open('source/syscalls-asm.asm',", "Makefile.msvc') else: print('done! recompile with:\\nmake -f Makefile.mingw') if __name__ ==", "open('source/syscalls.c', 'w') as f: f.write(code) with open('source/syscalls-asm.asm') as f: code", "name = function_name + '\\0' ror8 = lambda v: ((v", ") if matches: with open(header_file, 'w') as f: f.write(code) def", "match.group(1) def replace_seed(old_seed, new_seed): with open('include/syscalls.h') as f: code =", "replace_dinvoke_hashes(new_seed) if os.name == 'nt': print('done! recompile with:\\nnmake -f Makefile.msvc')", "syscall_name + '\\\\(.*?\"mov ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL) match = re.search(regex, syscall_definitions)", "24) & (2 ** 32 - 1)) for segment in", "as f: code = f.read() code = code.replace( f'#define SW2_SEED", "[s for s in [name[i:i + 2] for i in", "+ 2] for i in range(len(name))] if len(s) == 2]:", "f: f.write(code) with open('source/syscalls-asm.asm') as f: code = f.read() for", "{old_hash} - 0x{new_hash:08X}') code = code.replace( old_hash, f'0x{new_hash:08X}' ) with", "if os.name == 'nt': print('done! recompile with:\\nnmake -f Makefile.msvc') else:", "def get_function_hash(seed, function_name, is_syscall=True): function_hash = seed function_name = function_name.replace('_',", "= get_function_hash(seed, syscall_name) code = code.replace( f'0{old_hash}h', f'0{new_hash:08X}h', 1 )", "def replace_syscall_hashes(seed): with open('source/syscalls.c') as f: code = f.read() regex", "s in [name[i:i + 2] for i in range(len(name))] if", "match is not None, 'SW2_SEED not found!' return match.group(1) def", "= function_name + '\\0' ror8 = lambda v: ((v >>", "= f.read() code = code.replace( f'#define SW2_SEED {old_seed}', f'#define SW2_SEED", "with open('include/syscalls.h') as f: code = f.read() match = re.search(r'#define", "= 'Zw' + function_name[2:] name = function_name + '\\0' ror8", "{function_name}_SW2_HASH 0x{new_hash:08X}', 1 ) if matches: with open(header_file, 'w') as", "for i in range(len(name))] if len(s) == 2]: partial_name_short =", "if is_syscall and function_name[:2] == 'Nt': function_name = 'Zw' +", "+ syscall_name + '\\\\(.*?\"mov ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL) match = re.search(regex,", "f'0{new_hash:08X}h', 1 ) with open('source/syscalls-asm.asm', 'w') as f: f.write(code) def", "for s in [name[i:i + 2] for i in range(len(name))]", "(0x[a-fA-F0-9]{8})', code) assert match is not None, 'SW2_SEED not found!'", "is not None, f'hash of syscall {syscall_name} not found!' old_hash", "f.read() regex = re.compile(r'__declspec\\(naked\\) NTSTATUS (Nt[^(]+)') syscall_names = re.findall(regex, code)", "found!' return match.group(1) def replace_seed(old_seed, new_seed): with open('include/syscalls.h') as f:", "# -*- coding: utf-8 -*- import os import re import", "utf-8 -*- import os import re import glob import random", "as f: code = f.read() match = re.search(r'#define SW2_SEED (0x[a-fA-F0-9]{8})',", "+ function_name[2:] name = function_name + '\\0' ror8 = lambda", "'\\\\(.*?\"mov ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL) match = re.search(regex, syscall_definitions) assert match", "defined(__GNUC__)')[3] for syscall_name in syscall_names: regex = re.compile('NTSTATUS ' +", "== 'Nt': function_name = 'Zw' + function_name[2:] name = function_name", "= re.compile(r'__declspec\\(naked\\) NTSTATUS (Nt[^(]+)') syscall_names = re.findall(regex, code) syscall_names =", "- 1)) for segment in [s for s in [name[i:i", "def get_old_seed(): with open('include/syscalls.h') as f: code = f.read() match", "= re.compile('NTSTATUS ' + syscall_name + '\\\\(.*?\"mov ecx, (0x[A-Fa-f0-9]{8})', re.DOTALL)", "re.DOTALL) match = re.search(regex, syscall_definitions) assert match is not None,", "= seed function_name = function_name.replace('_', '') if is_syscall and function_name[:2]", "get_function_hash(seed, syscall_name) print(f'{syscall_name} -> {old_hash} - 0x{new_hash:08X}') code = code.replace(", "((v << 24) & (2 ** 32 - 1)) for", "os.name == 'nt': print('done! recompile with:\\nnmake -f Makefile.msvc') else: print('done!", "for syscall_name in syscall_names: regex = re.compile(syscall_name + ' PROC.*?mov", "None, f'hash of syscall {syscall_name} not found!' old_hash = match.group(1)", "'\\0' ror8 = lambda v: ((v >> 8) & (2", "28, 2 ** 32 - 1) #new_seed = 0x1337c0de old_seed", "regex = re.compile(r'#define (\\w+)_SW2_HASH (0x[a-fA-F0-9]{8})') matches = re.findall(regex, code) for", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import", "re.search(regex, syscall_definitions) assert match is not None, f'hash of syscall", "regex = re.compile(syscall_name + ' PROC.*?mov ecx, 0([A-Fa-f0-9]{8})h', re.DOTALL) match", "0x{new_hash:08X}', 1 ) if matches: with open(header_file, 'w') as f:", "import re import glob import random import struct def get_old_seed():", "re import glob import random import struct def get_old_seed(): with", "import random import struct def get_old_seed(): with open('include/syscalls.h') as f:", "= match.group(1) new_hash = get_function_hash(seed, syscall_name) print(f'{syscall_name} -> {old_hash} -" ]
[ "= \"Exit code ({}): {}\".format(abs(response.returncode), errmsg) return response.returncode, stderr def", "Error\" ) response.stdout = \"\" if not response.stdout else str(response.stdout)", "{}\".format(abs(response.returncode), errmsg) return response.returncode, stderr def execute(cmd, workdir=None, timeout=60): cmd", "from codes import exitcodes def _error_decode(response): stderr = \"\" if", "= response.stderr stderr = \"Exit code ({}): {}\".format(abs(response.returncode), errmsg) return", "returncode=124, stderr=\"Timeout\" ) except: response = CompletedProcess( args=cmd, returncode=-1, stderr=\"Internal", "codes import exitcodes def _error_decode(response): stderr = \"\" if response.returncode:", "universal_newlines=True, ) except TimeoutExpired: response = CompletedProcess( args=cmd, returncode=124, stderr=\"Timeout\"", "stderr=\"Internal Checker Error\" ) response.stdout = \"\" if not response.stdout", "TimeoutExpired: response = CompletedProcess( args=cmd, returncode=124, stderr=\"Timeout\" ) except: response", "cwd=workdir, timeout=timeout, universal_newlines=True, ) except TimeoutExpired: response = CompletedProcess( args=cmd,", ") except TimeoutExpired: response = CompletedProcess( args=cmd, returncode=124, stderr=\"Timeout\" )", "stderr=\"Timeout\" ) except: response = CompletedProcess( args=cmd, returncode=-1, stderr=\"Internal Checker", "stderr = \"\" if response.returncode: if response.returncode < 0: errmsg", "except: response = CompletedProcess( args=cmd, returncode=-1, stderr=\"Internal Checker Error\" )", "import run, PIPE, TimeoutExpired, CompletedProcess from codes import exitcodes def", "response.stdout = \"\" if not response.stdout else str(response.stdout) response.returncode, response.stderr", "\"\" if not response.stdout else str(response.stdout) response.returncode, response.stderr = _error_decode(response)", "response.returncode, stderr def execute(cmd, workdir=None, timeout=60): cmd = [\"/bin/bash\", \"-c\",", "errmsg) return response.returncode, stderr def execute(cmd, workdir=None, timeout=60): cmd =", "else: errmsg = response.stderr stderr = \"Exit code ({}): {}\".format(abs(response.returncode),", "if isinstance(errmsg, dict): errmsg = errmsg[\"descr\"] else: errmsg = response.stderr", "exitcodes def _error_decode(response): stderr = \"\" if response.returncode: if response.returncode", "code ({}): {}\".format(abs(response.returncode), errmsg) return response.returncode, stderr def execute(cmd, workdir=None,", "exitcodes.get(abs(response.returncode), \"Unknown Error\") if isinstance(errmsg, dict): errmsg = errmsg[\"descr\"] else:", "workdir=None, timeout=60): cmd = [\"/bin/bash\", \"-c\", cmd] try: response =", "<filename>checker/checker/executer.py from subprocess import run, PIPE, TimeoutExpired, CompletedProcess from codes", "response = run( cmd, stderr=PIPE, stdout=PIPE, cwd=workdir, timeout=timeout, universal_newlines=True, )", "if response.returncode < 0: errmsg = exitcodes.get(abs(response.returncode), \"Unknown Error\") if", "= run( cmd, stderr=PIPE, stdout=PIPE, cwd=workdir, timeout=timeout, universal_newlines=True, ) except", "run( cmd, stderr=PIPE, stdout=PIPE, cwd=workdir, timeout=timeout, universal_newlines=True, ) except TimeoutExpired:", "args=cmd, returncode=124, stderr=\"Timeout\" ) except: response = CompletedProcess( args=cmd, returncode=-1,", "= \"\" if not response.stdout else str(response.stdout) response.returncode, response.stderr =", "def _error_decode(response): stderr = \"\" if response.returncode: if response.returncode <", ") except: response = CompletedProcess( args=cmd, returncode=-1, stderr=\"Internal Checker Error\"", "stderr def execute(cmd, workdir=None, timeout=60): cmd = [\"/bin/bash\", \"-c\", cmd]", "response = CompletedProcess( args=cmd, returncode=-1, stderr=\"Internal Checker Error\" ) response.stdout", "response.stderr stderr = \"Exit code ({}): {}\".format(abs(response.returncode), errmsg) return response.returncode,", "stderr = \"Exit code ({}): {}\".format(abs(response.returncode), errmsg) return response.returncode, stderr", "cmd = [\"/bin/bash\", \"-c\", cmd] try: response = run( cmd,", "return response.returncode, stderr def execute(cmd, workdir=None, timeout=60): cmd = [\"/bin/bash\",", "\"\" if response.returncode: if response.returncode < 0: errmsg = exitcodes.get(abs(response.returncode),", "\"Exit code ({}): {}\".format(abs(response.returncode), errmsg) return response.returncode, stderr def execute(cmd,", "cmd, stderr=PIPE, stdout=PIPE, cwd=workdir, timeout=timeout, universal_newlines=True, ) except TimeoutExpired: response", "= CompletedProcess( args=cmd, returncode=124, stderr=\"Timeout\" ) except: response = CompletedProcess(", "if response.returncode: if response.returncode < 0: errmsg = exitcodes.get(abs(response.returncode), \"Unknown", "Checker Error\" ) response.stdout = \"\" if not response.stdout else", "= errmsg[\"descr\"] else: errmsg = response.stderr stderr = \"Exit code", "errmsg = exitcodes.get(abs(response.returncode), \"Unknown Error\") if isinstance(errmsg, dict): errmsg =", "subprocess import run, PIPE, TimeoutExpired, CompletedProcess from codes import exitcodes", "cmd] try: response = run( cmd, stderr=PIPE, stdout=PIPE, cwd=workdir, timeout=timeout,", "= \"\" if response.returncode: if response.returncode < 0: errmsg =", "execute(cmd, workdir=None, timeout=60): cmd = [\"/bin/bash\", \"-c\", cmd] try: response", "response = CompletedProcess( args=cmd, returncode=124, stderr=\"Timeout\" ) except: response =", "TimeoutExpired, CompletedProcess from codes import exitcodes def _error_decode(response): stderr =", "timeout=timeout, universal_newlines=True, ) except TimeoutExpired: response = CompletedProcess( args=cmd, returncode=124,", "args=cmd, returncode=-1, stderr=\"Internal Checker Error\" ) response.stdout = \"\" if", "response.returncode < 0: errmsg = exitcodes.get(abs(response.returncode), \"Unknown Error\") if isinstance(errmsg,", "\"Unknown Error\") if isinstance(errmsg, dict): errmsg = errmsg[\"descr\"] else: errmsg", "run, PIPE, TimeoutExpired, CompletedProcess from codes import exitcodes def _error_decode(response):", "Error\") if isinstance(errmsg, dict): errmsg = errmsg[\"descr\"] else: errmsg =", "\"-c\", cmd] try: response = run( cmd, stderr=PIPE, stdout=PIPE, cwd=workdir,", "import exitcodes def _error_decode(response): stderr = \"\" if response.returncode: if", "= exitcodes.get(abs(response.returncode), \"Unknown Error\") if isinstance(errmsg, dict): errmsg = errmsg[\"descr\"]", "[\"/bin/bash\", \"-c\", cmd] try: response = run( cmd, stderr=PIPE, stdout=PIPE,", "CompletedProcess( args=cmd, returncode=-1, stderr=\"Internal Checker Error\" ) response.stdout = \"\"", "< 0: errmsg = exitcodes.get(abs(response.returncode), \"Unknown Error\") if isinstance(errmsg, dict):", "PIPE, TimeoutExpired, CompletedProcess from codes import exitcodes def _error_decode(response): stderr", "timeout=60): cmd = [\"/bin/bash\", \"-c\", cmd] try: response = run(", ") response.stdout = \"\" if not response.stdout else str(response.stdout) response.returncode,", "errmsg[\"descr\"] else: errmsg = response.stderr stderr = \"Exit code ({}):", "try: response = run( cmd, stderr=PIPE, stdout=PIPE, cwd=workdir, timeout=timeout, universal_newlines=True,", "CompletedProcess( args=cmd, returncode=124, stderr=\"Timeout\" ) except: response = CompletedProcess( args=cmd,", "= [\"/bin/bash\", \"-c\", cmd] try: response = run( cmd, stderr=PIPE,", "errmsg = response.stderr stderr = \"Exit code ({}): {}\".format(abs(response.returncode), errmsg)", "isinstance(errmsg, dict): errmsg = errmsg[\"descr\"] else: errmsg = response.stderr stderr", "dict): errmsg = errmsg[\"descr\"] else: errmsg = response.stderr stderr =", "if not response.stdout else str(response.stdout) response.returncode, response.stderr = _error_decode(response) return", "def execute(cmd, workdir=None, timeout=60): cmd = [\"/bin/bash\", \"-c\", cmd] try:", "= CompletedProcess( args=cmd, returncode=-1, stderr=\"Internal Checker Error\" ) response.stdout =", "except TimeoutExpired: response = CompletedProcess( args=cmd, returncode=124, stderr=\"Timeout\" ) except:", "not response.stdout else str(response.stdout) response.returncode, response.stderr = _error_decode(response) return response", "from subprocess import run, PIPE, TimeoutExpired, CompletedProcess from codes import", "response.returncode: if response.returncode < 0: errmsg = exitcodes.get(abs(response.returncode), \"Unknown Error\")", "stdout=PIPE, cwd=workdir, timeout=timeout, universal_newlines=True, ) except TimeoutExpired: response = CompletedProcess(", "CompletedProcess from codes import exitcodes def _error_decode(response): stderr = \"\"", "_error_decode(response): stderr = \"\" if response.returncode: if response.returncode < 0:", "stderr=PIPE, stdout=PIPE, cwd=workdir, timeout=timeout, universal_newlines=True, ) except TimeoutExpired: response =", "0: errmsg = exitcodes.get(abs(response.returncode), \"Unknown Error\") if isinstance(errmsg, dict): errmsg", "({}): {}\".format(abs(response.returncode), errmsg) return response.returncode, stderr def execute(cmd, workdir=None, timeout=60):", "errmsg = errmsg[\"descr\"] else: errmsg = response.stderr stderr = \"Exit", "returncode=-1, stderr=\"Internal Checker Error\" ) response.stdout = \"\" if not" ]
[ "from pfm.util.log import logger class UpdateCommand(BaseCommand): def __init__(self, name, forward_type,", "remote_host, remote_port, local_port, ssh_server, server_port, login_user, config): super(UpdateCommand, self).__init__(config) self.name", "self.name in targets: target = targets[self.name] self.update(target) else: logger.warn(\"Port forward", "target = targets[self.name] self.update(target) else: logger.warn(\"Port forward setting named \"", "if self.remote_host is not None: target[\"remote_host\"] = self.remote_host if self.remote_port", "targets = json.load(f) if self.name in targets: target = targets[self.name]", "= forward_type self.remote_host = remote_host self.remote_port = remote_port self.local_port =", "self.remote_port = remote_port self.local_port = local_port self.ssh_server = ssh_server self.server_port", "self.remote_host if self.remote_port is not None: target[\"remote_port\"] = self.remote_port if", "if self.local_port is not None: target[\"local_port\"] = self.local_port if self.ssh_server", "run(self): f = open(self.config_path, 'r') targets = json.load(f) if self.name", "super(UpdateCommand, self).__init__(config) self.name = name self.forward_type = forward_type self.remote_host =", "None: target[\"remote_port\"] = self.remote_port if self.local_port is not None: target[\"local_port\"]", "None: target[\"server_port\"] = self.server_port if self.login_user is not None: target[\"login_user\"]", "= remote_port self.local_port = local_port self.ssh_server = ssh_server self.server_port =", "None: target[\"type\"] = self.forward_type if self.remote_host is not None: target[\"remote_host\"]", "target[\"remote_host\"] = self.remote_host if self.remote_port is not None: target[\"remote_port\"] =", "targets[self.name] self.update(target) else: logger.warn(\"Port forward setting named \" + self.name", "not None: target[\"remote_host\"] = self.remote_host if self.remote_port is not None:", "open(self.config_path, 'r') targets = json.load(f) if self.name in targets: target", "target[\"ssh_server\"] = self.ssh_server if self.server_port is not None: target[\"server_port\"] =", "if self.ssh_server is not None: target[\"ssh_server\"] = self.ssh_server if self.server_port", "self.remote_host = remote_host self.remote_port = remote_port self.local_port = local_port self.ssh_server", "self.forward_type if self.remote_host is not None: target[\"remote_host\"] = self.remote_host if", "self.login_user = login_user def run(self): f = open(self.config_path, 'r') targets", "in targets: target = targets[self.name] self.update(target) else: logger.warn(\"Port forward setting", "= self.remote_host if self.remote_port is not None: target[\"remote_port\"] = self.remote_port", "= self.forward_type if self.remote_host is not None: target[\"remote_host\"] = self.remote_host", "+ \"is not registered\") # write the target f =", "logger class UpdateCommand(BaseCommand): def __init__(self, name, forward_type, remote_host, remote_port, local_port,", "ssh_server self.server_port = server_port self.login_user = login_user def run(self): f", "self.server_port = server_port self.login_user = login_user def run(self): f =", "server_port self.login_user = login_user def run(self): f = open(self.config_path, 'r')", "f.close() def update(self, target): if self.forward_type is not None: target[\"type\"]", "def __init__(self, name, forward_type, remote_host, remote_port, local_port, ssh_server, server_port, login_user,", "not None: target[\"server_port\"] = self.server_port if self.login_user is not None:", "self.local_port if self.ssh_server is not None: target[\"ssh_server\"] = self.ssh_server if", "self.local_port = local_port self.ssh_server = ssh_server self.server_port = server_port self.login_user", "if self.remote_port is not None: target[\"remote_port\"] = self.remote_port if self.local_port", "= remote_host self.remote_port = remote_port self.local_port = local_port self.ssh_server =", "= login_user def run(self): f = open(self.config_path, 'r') targets =", "self.ssh_server if self.server_port is not None: target[\"server_port\"] = self.server_port if", "self.update(target) else: logger.warn(\"Port forward setting named \" + self.name +", "self.forward_type = forward_type self.remote_host = remote_host self.remote_port = remote_port self.local_port", "f = open(self.config_path, 'r') targets = json.load(f) if self.name in", "local_port, ssh_server, server_port, login_user, config): super(UpdateCommand, self).__init__(config) self.name = name", "not None: target[\"local_port\"] = self.local_port if self.ssh_server is not None:", "write the target f = open(self.config_path, 'w') f.write(json.dumps(targets, indent=4)) f.close()", "self.ssh_server = ssh_server self.server_port = server_port self.login_user = login_user def", "from pfm.pf_command.base import BaseCommand from pfm.util.log import logger class UpdateCommand(BaseCommand):", "self.local_port is not None: target[\"local_port\"] = self.local_port if self.ssh_server is", "= self.local_port if self.ssh_server is not None: target[\"ssh_server\"] = self.ssh_server", "logger.warn(\"Port forward setting named \" + self.name + \"is not", "json from pfm.pf_command.base import BaseCommand from pfm.util.log import logger class", "indent=4)) f.close() def update(self, target): if self.forward_type is not None:", "is not None: target[\"remote_port\"] = self.remote_port if self.local_port is not", "def run(self): f = open(self.config_path, 'r') targets = json.load(f) if", "target): if self.forward_type is not None: target[\"type\"] = self.forward_type if", "class UpdateCommand(BaseCommand): def __init__(self, name, forward_type, remote_host, remote_port, local_port, ssh_server,", "<reponame>takahi-i/pfm import json from pfm.pf_command.base import BaseCommand from pfm.util.log import", "open(self.config_path, 'w') f.write(json.dumps(targets, indent=4)) f.close() def update(self, target): if self.forward_type", "self.ssh_server is not None: target[\"ssh_server\"] = self.ssh_server if self.server_port is", "is not None: target[\"remote_host\"] = self.remote_host if self.remote_port is not", "forward_type self.remote_host = remote_host self.remote_port = remote_port self.local_port = local_port", "f.write(json.dumps(targets, indent=4)) f.close() def update(self, target): if self.forward_type is not", "'r') targets = json.load(f) if self.name in targets: target =", "not None: target[\"type\"] = self.forward_type if self.remote_host is not None:", "not None: target[\"ssh_server\"] = self.ssh_server if self.server_port is not None:", "f = open(self.config_path, 'w') f.write(json.dumps(targets, indent=4)) f.close() def update(self, target):", "if self.server_port is not None: target[\"server_port\"] = self.server_port if self.login_user", "remote_port self.local_port = local_port self.ssh_server = ssh_server self.server_port = server_port", "local_port self.ssh_server = ssh_server self.server_port = server_port self.login_user = login_user", "target[\"server_port\"] = self.server_port if self.login_user is not None: target[\"login_user\"] =", "self.remote_port if self.local_port is not None: target[\"local_port\"] = self.local_port if", "= name self.forward_type = forward_type self.remote_host = remote_host self.remote_port =", "server_port, login_user, config): super(UpdateCommand, self).__init__(config) self.name = name self.forward_type =", "target f = open(self.config_path, 'w') f.write(json.dumps(targets, indent=4)) f.close() def update(self,", "named \" + self.name + \"is not registered\") # write", "forward_type, remote_host, remote_port, local_port, ssh_server, server_port, login_user, config): super(UpdateCommand, self).__init__(config)", "target[\"type\"] = self.forward_type if self.remote_host is not None: target[\"remote_host\"] =", "= self.server_port if self.login_user is not None: target[\"login_user\"] = self.login_user", "not registered\") # write the target f = open(self.config_path, 'w')", "import json from pfm.pf_command.base import BaseCommand from pfm.util.log import logger", "self.forward_type is not None: target[\"type\"] = self.forward_type if self.remote_host is", "is not None: target[\"server_port\"] = self.server_port if self.login_user is not", "is not None: target[\"type\"] = self.forward_type if self.remote_host is not", "the target f = open(self.config_path, 'w') f.write(json.dumps(targets, indent=4)) f.close() def", "= targets[self.name] self.update(target) else: logger.warn(\"Port forward setting named \" +", "remote_host self.remote_port = remote_port self.local_port = local_port self.ssh_server = ssh_server", "self.remote_host is not None: target[\"remote_host\"] = self.remote_host if self.remote_port is", "self.server_port is not None: target[\"server_port\"] = self.server_port if self.login_user is", "pfm.pf_command.base import BaseCommand from pfm.util.log import logger class UpdateCommand(BaseCommand): def", "self.name + \"is not registered\") # write the target f", "__init__(self, name, forward_type, remote_host, remote_port, local_port, ssh_server, server_port, login_user, config):", "BaseCommand from pfm.util.log import logger class UpdateCommand(BaseCommand): def __init__(self, name,", "\"is not registered\") # write the target f = open(self.config_path,", "targets: target = targets[self.name] self.update(target) else: logger.warn(\"Port forward setting named", "not None: target[\"remote_port\"] = self.remote_port if self.local_port is not None:", "name, forward_type, remote_host, remote_port, local_port, ssh_server, server_port, login_user, config): super(UpdateCommand,", "is not None: target[\"local_port\"] = self.local_port if self.ssh_server is not", "if self.forward_type is not None: target[\"type\"] = self.forward_type if self.remote_host", "def update(self, target): if self.forward_type is not None: target[\"type\"] =", "import BaseCommand from pfm.util.log import logger class UpdateCommand(BaseCommand): def __init__(self,", "setting named \" + self.name + \"is not registered\") #", "forward setting named \" + self.name + \"is not registered\")", "= open(self.config_path, 'r') targets = json.load(f) if self.name in targets:", "target[\"remote_port\"] = self.remote_port if self.local_port is not None: target[\"local_port\"] =", "\" + self.name + \"is not registered\") # write the", "self.remote_port is not None: target[\"remote_port\"] = self.remote_port if self.local_port is", "login_user, config): super(UpdateCommand, self).__init__(config) self.name = name self.forward_type = forward_type", "None: target[\"local_port\"] = self.local_port if self.ssh_server is not None: target[\"ssh_server\"]", "import logger class UpdateCommand(BaseCommand): def __init__(self, name, forward_type, remote_host, remote_port,", "name self.forward_type = forward_type self.remote_host = remote_host self.remote_port = remote_port", "= json.load(f) if self.name in targets: target = targets[self.name] self.update(target)", "= ssh_server self.server_port = server_port self.login_user = login_user def run(self):", "= server_port self.login_user = login_user def run(self): f = open(self.config_path,", "json.load(f) if self.name in targets: target = targets[self.name] self.update(target) else:", "# write the target f = open(self.config_path, 'w') f.write(json.dumps(targets, indent=4))", "'w') f.write(json.dumps(targets, indent=4)) f.close() def update(self, target): if self.forward_type is", "login_user def run(self): f = open(self.config_path, 'r') targets = json.load(f)", "= open(self.config_path, 'w') f.write(json.dumps(targets, indent=4)) f.close() def update(self, target): if", "else: logger.warn(\"Port forward setting named \" + self.name + \"is", "self).__init__(config) self.name = name self.forward_type = forward_type self.remote_host = remote_host", "update(self, target): if self.forward_type is not None: target[\"type\"] = self.forward_type", "None: target[\"remote_host\"] = self.remote_host if self.remote_port is not None: target[\"remote_port\"]", "is not None: target[\"ssh_server\"] = self.ssh_server if self.server_port is not", "registered\") # write the target f = open(self.config_path, 'w') f.write(json.dumps(targets,", "if self.name in targets: target = targets[self.name] self.update(target) else: logger.warn(\"Port", "UpdateCommand(BaseCommand): def __init__(self, name, forward_type, remote_host, remote_port, local_port, ssh_server, server_port,", "= self.remote_port if self.local_port is not None: target[\"local_port\"] = self.local_port", "target[\"local_port\"] = self.local_port if self.ssh_server is not None: target[\"ssh_server\"] =", "None: target[\"ssh_server\"] = self.ssh_server if self.server_port is not None: target[\"server_port\"]", "config): super(UpdateCommand, self).__init__(config) self.name = name self.forward_type = forward_type self.remote_host", "self.name = name self.forward_type = forward_type self.remote_host = remote_host self.remote_port", "= local_port self.ssh_server = ssh_server self.server_port = server_port self.login_user =", "+ self.name + \"is not registered\") # write the target", "remote_port, local_port, ssh_server, server_port, login_user, config): super(UpdateCommand, self).__init__(config) self.name =", "pfm.util.log import logger class UpdateCommand(BaseCommand): def __init__(self, name, forward_type, remote_host,", "= self.ssh_server if self.server_port is not None: target[\"server_port\"] = self.server_port", "ssh_server, server_port, login_user, config): super(UpdateCommand, self).__init__(config) self.name = name self.forward_type" ]
[ "\"testing a fork failure\") atfork.atfork(self._pre, self._parent, self._child) orig_orig_fork = getattr(atfork,", "self.assertEqual([self._child] * 2, atfork._child_call_list) if __debug__: self.assertRaises(AssertionError, atfork.atfork, 1, 2,", "the PSF under a Contributor Agreement. # # Author: <NAME>", "this just in case. os._exit(0) except OSError: self.assertEqual([self._pre, self._parent], self.calls)", "def test_call_atfork_list(self): self.assertEqual([], atfork._call_atfork_list([])) self.assertEqual([], atfork._call_atfork_list([self._pre])) def raise_something(): raise RuntimeError()", "error_msg), error_msg) self.assertTrue( (error_msg.index(\"first parent\") < error_msg.index(\"second parent\")), \"first and", "2.0 (the \"License\"); # you may not use this file", "never happen but do this just in case. os._exit(0) except", "self.assertTrue(callable(atfork._orig_os_forkpty)) # The os module was patched, these should not", "raise RuntimeError(\"This as the first parent error expected.\") def _raise_parent(self):", "the os module. ook. global os importlib.reload(os) sys.stderr = self.orig_stderr", "1, 2, 3) def test_call_atfork_list(self): self.assertEqual([], atfork._call_atfork_list([])) self.assertEqual([], atfork._call_atfork_list([self._pre])) def", "as the second parent error expected.\") def _raise_child(self): self._child() raise", "parent\") < error_msg.index(\"second parent\")), \"first and second errors out of", "self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self._assert_expected_child_stderr(sys.stderr.getvalue()) except BaseException: try: traceback.print_exc() self.orig_stderr.write(sys.stderr.getvalue())", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "[] self.orig_stderr = sys.stderr self.assertFalse( atfork._fork_lock.locked(), \"atfork._fork_lock not released by", "RuntimeError(\"This as the first parent error expected.\") def _raise_parent(self): self._parent()", "error\" in error_msg), error_msg) self.assertTrue( (error_msg.index(\"first parent\") < error_msg.index(\"second parent\")),", "[self._pre, self._other, self._parent, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self.assertEqual(0, os.waitpid(pid, 0)[1],", "atfork.monkeypatch_os_fork_functions() self.calls = [] self.orig_stderr = sys.stderr self.assertFalse( atfork._fork_lock.locked(), \"atfork._fork_lock", "not be equal. self.assertNotEqual(atfork._orig_os_fork, os.fork) self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty) # These are", "atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) if __name__ == \"__main__\": unittest.main()", "_test_a_fork_wrapper(self, fork_func): sys.stderr = StringIO() # restored in tearDown atfork.atfork(self._raise_pre,", "pid = fork_wrapper() if pid == 0: # This should", "self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty) # These are the wrapped versions we patched", "orig_fork_attrname) try: setattr(atfork, orig_fork_attrname, failing_fork) try: pid = fork_wrapper() if", "OSError(0, \"testing a fork failure\") atfork.atfork(self._pre, self._parent, self._child) orig_orig_fork =", "traceback import unittest from xTool import atfork class AtforkTest(unittest.TestCase): def", "def test_monkeypatching(self): if not hasattr(os, \"fork\"): return # Nothing to", "use this file except in compliance with the License. #", "module was patched, these should not be equal. self.assertNotEqual(atfork._orig_os_fork, os.fork)", "atfork.\"\"\" import os import sys import importlib from xTool.compat import", "else: self.assertEqual( [self._pre, self._other, self._parent, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self.assertEqual(0,", "self.calls.append(self._pre) def _parent(self): self.calls.append(self._parent) def _child(self): self.calls.append(self._child) def _other(self): self.calls.append(self._other)", "child\") self._assert_expected_parent_stderr(sys.stderr.getvalue()) def test_os_fork_wrapper(self): self._test_a_fork_wrapper(os.fork) def test_os_forkpty_wrapper(self): self._test_a_fork_wrapper(lambda: os.forkpty()[0]) def", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "error expected.\") def _raise_child(self): self._child() raise RuntimeError(\"This child error is", "License. # You may obtain a copy of the License", "# restored in tearDown atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child) atfork.atfork(self._other, self._other, self._other)", "# # Licensed to the PSF under a Contributor Agreement.", "and second errors out of order in:\\n%r\" % error_msg, )", "self.calls.append(self._parent) def _child(self): self.calls.append(self._child) def _other(self): self.calls.append(self._other) def _raise_pre(self): self._pre()", "arguments as well as None. atfork.atfork(self._pre, self._parent, self._child) atfork.atfork(prepare=self._pre) atfork.atfork(parent=self._parent)", "error\" in error_msg), error_msg) self.assertTrue((\"second parent error\" in error_msg), error_msg)", "expected\" in error_msg) self.assertEqual(1, error_msg.count(\"RuntimeError:\"), error_msg) def test_monkeypatching(self): if not", "def _test_a_fork_wrapper(self, fork_func): sys.stderr = StringIO() # restored in tearDown", "under the License is distributed on an \"AS IS\" BASIS,", "# # Author: <NAME> <<EMAIL>> \"\"\"Tests for atfork.\"\"\" import os", "License for the specific language governing permissions and # limitations", "order in:\\n%r\" % error_msg, ) self.assertEqual(2, error_msg.count(\"RuntimeError:\")) def _assert_expected_child_stderr(self, error_msg):", "fork_func): sys.stderr = StringIO() # restored in tearDown atfork.atfork(self._raise_pre, self._raise_parent,", "errors: self.assertEqual(RuntimeError, exc_info[0]) def _test_a_fork_wrapper(self, fork_func): sys.stderr = StringIO() #", "if pid == 0: # This should never happen but", "try: traceback.print_exc() self.orig_stderr.write(sys.stderr.getvalue()) finally: os._exit(1) finally: os._exit(0) else: self.assertEqual( [self._pre,", "self._parent, self._child) orig_orig_fork = getattr(atfork, orig_fork_attrname) try: setattr(atfork, orig_fork_attrname, failing_fork)", "self._other, self._other) pid = fork_func() if pid == 0: try:", "self.assertTrue(callable(atfork._orig_os_fork)) self.assertTrue(callable(atfork._orig_os_forkpty)) # The os module was patched, these should", "fork_wrapper): def failing_fork(): raise OSError(0, \"testing a fork failure\") atfork.atfork(self._pre,", "child error is expected.\") def _assert_expected_parent_stderr(self, error_msg): self.assertTrue((\"first parent error\"", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "to the PSF under a Contributor Agreement. # # Author:", "atfork.os_fork_wrapper) def test_forkpty_wrapper_failure(self): self._test_fork_failure(\"_orig_os_forkpty\", atfork.os_forkpty_wrapper) def test_multiple_monkeypatch_safe(self): self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty,", "atfork.atfork(parent=self._parent) atfork.atfork(child=self._child) self.assertEqual([self._pre] * 2, atfork._prepare_call_list) self.assertEqual([self._parent] * 2, atfork._parent_call_list)", "tearDown atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child) atfork.atfork(self._other, self._other, self._other) pid = fork_func()", "_pre(self): self.calls.append(self._pre) def _parent(self): self.calls.append(self._parent) def _child(self): self.calls.append(self._child) def _other(self):", "StringIO() # restored in tearDown atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child) atfork.atfork(self._other, self._other,", "# These are the wrapped versions we patched in. self.assertEqual(atfork.os_fork_wrapper,", "import StringIO import traceback import unittest from xTool import atfork", "xTool import atfork class AtforkTest(unittest.TestCase): def setUp(self): atfork.monkeypatch_os_fork_functions() self.calls =", "\"atfork._fork_lock not released by an earlier test!\", ) # Unregister", "def test_multiple_monkeypatch_safe(self): self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty,", "released by an earlier test!\", ) # Unregister calls registered", "= self.orig_stderr def _pre(self): self.calls.append(self._pre) def _parent(self): self.calls.append(self._parent) def _child(self):", "the second parent error expected.\") def _raise_child(self): self._child() raise RuntimeError(\"This", "setUp(self): atfork.monkeypatch_os_fork_functions() self.calls = [] self.orig_stderr = sys.stderr self.assertFalse( atfork._fork_lock.locked(),", "atfork._call_atfork_list([self._pre])) def raise_something(): raise RuntimeError() errors = atfork._call_atfork_list([raise_something] * 2)", "a fork failure\") atfork.atfork(self._pre, self._parent, self._child) orig_orig_fork = getattr(atfork, orig_fork_attrname)", "import unittest from xTool import atfork class AtforkTest(unittest.TestCase): def setUp(self):", "os.forkpty) # These are the wrapped versions we patched in.", ") # Unregister calls registered by earlier tests. atfork._prepare_call_list =", "Unregister calls registered by earlier tests. atfork._prepare_call_list = [] atfork._parent_call_list", "restored in tearDown atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child) atfork.atfork(self._other, self._other, self._other) pid", "None. atfork.atfork(self._pre, self._parent, self._child) atfork.atfork(prepare=self._pre) atfork.atfork(parent=self._parent) atfork.atfork(child=self._child) self.assertEqual([self._pre] * 2,", "BaseException: try: traceback.print_exc() self.orig_stderr.write(sys.stderr.getvalue()) finally: os._exit(1) finally: os._exit(0) else: self.assertEqual(", "def tearDown(self): # Un-monkeypatch the os module. ook. global os", "= fork_func() if pid == 0: try: try: self.assertEqual( [self._pre,", "_raise_pre(self): self._pre() raise RuntimeError(\"This as the first parent error expected.\")", "\"\"\"Tests for atfork.\"\"\" import os import sys import importlib from", "Nothing to test on this platform. self.assertTrue(callable(atfork._orig_os_fork)) self.assertTrue(callable(atfork._orig_os_forkpty)) # The", "2, 3) def test_call_atfork_list(self): self.assertEqual([], atfork._call_atfork_list([])) self.assertEqual([], atfork._call_atfork_list([self._pre])) def raise_something():", "in errors: self.assertEqual(RuntimeError, exc_info[0]) def _test_a_fork_wrapper(self, fork_func): sys.stderr = StringIO()", "test!\", ) # Unregister calls registered by earlier tests. atfork._prepare_call_list", "errors = atfork._call_atfork_list([raise_something] * 2) self.assertEqual(2, len(errors)) for exc_info in", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "importlib from xTool.compat import StringIO import traceback import unittest from", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "not released by an earlier test!\", ) # Unregister calls", "to in writing, software # distributed under the License is", "# See the License for the specific language governing permissions", "fail!\") finally: setattr(atfork, orig_fork_attrname, orig_orig_fork) def test_fork_wrapper_failure(self): self._test_fork_failure(\"_orig_os_fork\", atfork.os_fork_wrapper) def", "orig_fork_attrname, fork_wrapper): def failing_fork(): raise OSError(0, \"testing a fork failure\")", "self.assertFalse( atfork._fork_lock.locked(), \"atfork._fork_lock not released by an earlier test!\", )", "in child\") self._assert_expected_parent_stderr(sys.stderr.getvalue()) def test_os_fork_wrapper(self): self._test_a_fork_wrapper(os.fork) def test_os_forkpty_wrapper(self): self._test_a_fork_wrapper(lambda: os.forkpty()[0])", "language governing permissions and # limitations under the License. #", "case. os._exit(0) except OSError: self.assertEqual([self._pre, self._parent], self.calls) else: self.fail(\"Fork failed", "or agreed to in writing, software # distributed under the", "self.assertEqual(RuntimeError, exc_info[0]) def _test_a_fork_wrapper(self, fork_func): sys.stderr = StringIO() # restored", "in tearDown atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child) atfork.atfork(self._other, self._other, self._other) pid =", "required by applicable law or agreed to in writing, software", "is expected.\") def _assert_expected_parent_stderr(self, error_msg): self.assertTrue((\"first parent error\" in error_msg),", "os import sys import importlib from xTool.compat import StringIO import", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "and keyword arguments as well as None. atfork.atfork(self._pre, self._parent, self._child)", "[] atfork._parent_call_list = [] atfork._child_call_list = [] def tearDown(self): #", "with the License. # You may obtain a copy of", "parent error\" in error_msg), error_msg) self.assertTrue( (error_msg.index(\"first parent\") < error_msg.index(\"second", "= atfork._call_atfork_list([raise_something] * 2) self.assertEqual(2, len(errors)) for exc_info in errors:", "second errors out of order in:\\n%r\" % error_msg, ) self.assertEqual(2,", "failed to fail!\") finally: setattr(atfork, orig_fork_attrname, orig_orig_fork) def test_fork_wrapper_failure(self): self._test_fork_failure(\"_orig_os_fork\",", "= [] def tearDown(self): # Un-monkeypatch the os module. ook.", "self.assertTrue((\"second parent error\" in error_msg), error_msg) self.assertTrue( (error_msg.index(\"first parent\") <", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "= sys.stderr self.assertFalse( atfork._fork_lock.locked(), \"atfork._fork_lock not released by an earlier", "def _raise_parent(self): self._parent() raise RuntimeError(\"This as the second parent error", "os.fork) self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty) def test_register_atfork_calls(self): # Test with both positional", "wrapped versions we patched in. self.assertEqual(atfork.os_fork_wrapper, os.fork) self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty) def", "distributed under the License is distributed on an \"AS IS\"", "as the first parent error expected.\") def _raise_parent(self): self._parent() raise", "2, atfork._child_call_list) if __debug__: self.assertRaises(AssertionError, atfork.atfork, 1, 2, 3) def", "failing_fork(): raise OSError(0, \"testing a fork failure\") atfork.atfork(self._pre, self._parent, self._child)", "raise OSError(0, \"testing a fork failure\") atfork.atfork(self._pre, self._parent, self._child) orig_orig_fork", "keyword arguments as well as None. atfork.atfork(self._pre, self._parent, self._child) atfork.atfork(prepare=self._pre)", "def _other(self): self.calls.append(self._other) def _raise_pre(self): self._pre() raise RuntimeError(\"This as the", "the first parent error expected.\") def _raise_parent(self): self._parent() raise RuntimeError(\"This", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "self.assertEqual(atfork.os_fork_wrapper, os.fork) self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty) def test_register_atfork_calls(self): # Test with both", "os.forkpty()[0]) def _test_fork_failure(self, orig_fork_attrname, fork_wrapper): def failing_fork(): raise OSError(0, \"testing", "the License. # # Licensed to the PSF under a", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "try: self.assertEqual( [self._pre, self._other, self._child, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self._assert_expected_child_stderr(sys.stderr.getvalue())", "not use this file except in compliance with the License.", "The os module was patched, these should not be equal.", "self._pre() raise RuntimeError(\"This as the first parent error expected.\") def", "error is expected\" in error_msg) self.assertEqual(1, error_msg.count(\"RuntimeError:\"), error_msg) def test_monkeypatching(self):", "writing, software # distributed under the License is distributed on", "_raise_parent(self): self._parent() raise RuntimeError(\"This as the second parent error expected.\")", "atfork.atfork(prepare=self._pre) atfork.atfork(parent=self._parent) atfork.atfork(child=self._child) self.assertEqual([self._pre] * 2, atfork._prepare_call_list) self.assertEqual([self._parent] * 2,", "you may not use this file except in compliance with", ") self.assertFalse(atfork._fork_lock.locked()) self._assert_expected_child_stderr(sys.stderr.getvalue()) except BaseException: try: traceback.print_exc() self.orig_stderr.write(sys.stderr.getvalue()) finally: os._exit(1)", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "test_monkeypatching(self): if not hasattr(os, \"fork\"): return # Nothing to test", "# Test with both positional and keyword arguments as well", "[] def tearDown(self): # Un-monkeypatch the os module. ook. global", "finally: os._exit(1) finally: os._exit(0) else: self.assertEqual( [self._pre, self._other, self._parent, self._other],", "os._exit(1) finally: os._exit(0) else: self.assertEqual( [self._pre, self._other, self._parent, self._other], self.calls", "a Contributor Agreement. # # Author: <NAME> <<EMAIL>> \"\"\"Tests for", "orig_orig_fork) def test_fork_wrapper_failure(self): self._test_fork_failure(\"_orig_os_fork\", atfork.os_fork_wrapper) def test_forkpty_wrapper_failure(self): self._test_fork_failure(\"_orig_os_forkpty\", atfork.os_forkpty_wrapper) def", "error_msg) def test_monkeypatching(self): if not hasattr(os, \"fork\"): return # Nothing", "these should not be equal. self.assertNotEqual(atfork._orig_os_fork, os.fork) self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty) #", "2, atfork._prepare_call_list) self.assertEqual([self._parent] * 2, atfork._parent_call_list) self.assertEqual([self._child] * 2, atfork._child_call_list)", "CONDITIONS OF ANY KIND, either express or implied. # See", "import sys import importlib from xTool.compat import StringIO import traceback", "# Un-monkeypatch the os module. ook. global os importlib.reload(os) sys.stderr", "* 2, atfork._parent_call_list) self.assertEqual([self._child] * 2, atfork._child_call_list) if __debug__: self.assertRaises(AssertionError,", "self.assertEqual([], atfork._call_atfork_list([self._pre])) def raise_something(): raise RuntimeError() errors = atfork._call_atfork_list([raise_something] *", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "error_msg), error_msg) self.assertTrue((\"second parent error\" in error_msg), error_msg) self.assertTrue( (error_msg.index(\"first", "import os import sys import importlib from xTool.compat import StringIO", "< error_msg.index(\"second parent\")), \"first and second errors out of order", "error_msg.index(\"second parent\")), \"first and second errors out of order in:\\n%r\"", "self._test_fork_failure(\"_orig_os_fork\", atfork.os_fork_wrapper) def test_forkpty_wrapper_failure(self): self._test_fork_failure(\"_orig_os_forkpty\", atfork.os_forkpty_wrapper) def test_multiple_monkeypatch_safe(self): self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)", "def test_forkpty_wrapper_failure(self): self._test_fork_failure(\"_orig_os_forkpty\", atfork.os_forkpty_wrapper) def test_multiple_monkeypatch_safe(self): self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)", "<<EMAIL>> \"\"\"Tests for atfork.\"\"\" import os import sys import importlib", "RuntimeError(\"This child error is expected.\") def _assert_expected_parent_stderr(self, error_msg): self.assertTrue((\"first parent", "ook. global os importlib.reload(os) sys.stderr = self.orig_stderr def _pre(self): self.calls.append(self._pre)", "= getattr(atfork, orig_fork_attrname) try: setattr(atfork, orig_fork_attrname, failing_fork) try: pid =", "patched, these should not be equal. self.assertNotEqual(atfork._orig_os_fork, os.fork) self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty)", "earlier test!\", ) # Unregister calls registered by earlier tests.", "def _raise_child(self): self._child() raise RuntimeError(\"This child error is expected.\") def", "failing_fork) try: pid = fork_wrapper() if pid == 0: #", "def setUp(self): atfork.monkeypatch_os_fork_functions() self.calls = [] self.orig_stderr = sys.stderr self.assertFalse(", "# # Copyright 2009 Google Inc. # # Licensed under", "\"fork\"): return # Nothing to test on this platform. self.assertTrue(callable(atfork._orig_os_fork))", "test_fork_wrapper_failure(self): self._test_fork_failure(\"_orig_os_fork\", atfork.os_fork_wrapper) def test_forkpty_wrapper_failure(self): self._test_fork_failure(\"_orig_os_forkpty\", atfork.os_forkpty_wrapper) def test_multiple_monkeypatch_safe(self): self.assertNotEqual(atfork._orig_os_fork,", "Copyright 2009 Google Inc. # # Licensed under the Apache", "self._other) pid = fork_func() if pid == 0: try: try:", "# Author: <NAME> <<EMAIL>> \"\"\"Tests for atfork.\"\"\" import os import", "atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)", "on this platform. self.assertTrue(callable(atfork._orig_os_fork)) self.assertTrue(callable(atfork._orig_os_forkpty)) # The os module was", "orig_fork_attrname, failing_fork) try: pid = fork_wrapper() if pid == 0:", "in error_msg), error_msg) self.assertTrue( (error_msg.index(\"first parent\") < error_msg.index(\"second parent\")), \"first", "self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty) def test_register_atfork_calls(self): # Test with both positional and", "OR CONDITIONS OF ANY KIND, either express or implied. #", "= [] atfork._child_call_list = [] def tearDown(self): # Un-monkeypatch the", "in. self.assertEqual(atfork.os_fork_wrapper, os.fork) self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty) def test_register_atfork_calls(self): # Test with", "the License is distributed on an \"AS IS\" BASIS, #", "setattr(atfork, orig_fork_attrname, orig_orig_fork) def test_fork_wrapper_failure(self): self._test_fork_failure(\"_orig_os_fork\", atfork.os_fork_wrapper) def test_forkpty_wrapper_failure(self): self._test_fork_failure(\"_orig_os_forkpty\",", "3) def test_call_atfork_list(self): self.assertEqual([], atfork._call_atfork_list([])) self.assertEqual([], atfork._call_atfork_list([self._pre])) def raise_something(): raise", "try: pid = fork_wrapper() if pid == 0: # This", "exc_info[0]) def _test_a_fork_wrapper(self, fork_func): sys.stderr = StringIO() # restored in", "be equal. self.assertNotEqual(atfork._orig_os_fork, os.fork) self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty) # These are the", "[self._pre, self._other, self._child, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self._assert_expected_child_stderr(sys.stderr.getvalue()) except BaseException:", "parent\")), \"first and second errors out of order in:\\n%r\" %", "atfork.atfork(self._pre, self._parent, self._child) atfork.atfork(prepare=self._pre) atfork.atfork(parent=self._parent) atfork.atfork(child=self._child) self.assertEqual([self._pre] * 2, atfork._prepare_call_list)", "import atfork class AtforkTest(unittest.TestCase): def setUp(self): atfork.monkeypatch_os_fork_functions() self.calls = []", "self.fail(\"Fork failed to fail!\") finally: setattr(atfork, orig_fork_attrname, orig_orig_fork) def test_fork_wrapper_failure(self):", "atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork,", "expected.\") def _assert_expected_parent_stderr(self, error_msg): self.assertTrue((\"first parent error\" in error_msg), error_msg)", "law or agreed to in writing, software # distributed under", "= [] self.orig_stderr = sys.stderr self.assertFalse( atfork._fork_lock.locked(), \"atfork._fork_lock not released", "in error_msg), error_msg) self.assertTrue((\"second parent error\" in error_msg), error_msg) self.assertTrue(", "are the wrapped versions we patched in. self.assertEqual(atfork.os_fork_wrapper, os.fork) self.assertEqual(atfork.os_forkpty_wrapper,", "error is expected.\") def _assert_expected_parent_stderr(self, error_msg): self.assertTrue((\"first parent error\" in", "except OSError: self.assertEqual([self._pre, self._parent], self.calls) else: self.fail(\"Fork failed to fail!\")", "import importlib from xTool.compat import StringIO import traceback import unittest", "versions we patched in. self.assertEqual(atfork.os_fork_wrapper, os.fork) self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty) def test_register_atfork_calls(self):", "def _assert_expected_parent_stderr(self, error_msg): self.assertTrue((\"first parent error\" in error_msg), error_msg) self.assertTrue((\"second", "out of order in:\\n%r\" % error_msg, ) self.assertEqual(2, error_msg.count(\"RuntimeError:\")) def", "self.assertEqual(2, len(errors)) for exc_info in errors: self.assertEqual(RuntimeError, exc_info[0]) def _test_a_fork_wrapper(self,", "_assert_expected_child_stderr(self, error_msg): self.assertTrue(\"child error is expected\" in error_msg) self.assertEqual(1, error_msg.count(\"RuntimeError:\"),", "not hasattr(os, \"fork\"): return # Nothing to test on this", "= StringIO() # restored in tearDown atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child) atfork.atfork(self._other,", "pid == 0: try: try: self.assertEqual( [self._pre, self._other, self._child, self._other],", "License. # # Licensed to the PSF under a Contributor", "self.assertEqual([self._parent] * 2, atfork._parent_call_list) self.assertEqual([self._child] * 2, atfork._child_call_list) if __debug__:", "parent error expected.\") def _raise_child(self): self._child() raise RuntimeError(\"This child error", "happen but do this just in case. os._exit(0) except OSError:", "test_os_fork_wrapper(self): self._test_a_fork_wrapper(os.fork) def test_os_forkpty_wrapper(self): self._test_a_fork_wrapper(lambda: os.forkpty()[0]) def _test_fork_failure(self, orig_fork_attrname, fork_wrapper):", "in error_msg) self.assertEqual(1, error_msg.count(\"RuntimeError:\"), error_msg) def test_monkeypatching(self): if not hasattr(os,", "self.orig_stderr def _pre(self): self.calls.append(self._pre) def _parent(self): self.calls.append(self._parent) def _child(self): self.calls.append(self._child)", "may obtain a copy of the License at # #", "the wrapped versions we patched in. self.assertEqual(atfork.os_fork_wrapper, os.fork) self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty)", "self._other, self._parent, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self.assertEqual(0, os.waitpid(pid, 0)[1], \"error", "unittest from xTool import atfork class AtforkTest(unittest.TestCase): def setUp(self): atfork.monkeypatch_os_fork_functions()", "this platform. self.assertTrue(callable(atfork._orig_os_fork)) self.assertTrue(callable(atfork._orig_os_forkpty)) # The os module was patched,", "permissions and # limitations under the License. # # Licensed", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "finally: setattr(atfork, orig_fork_attrname, orig_orig_fork) def test_fork_wrapper_failure(self): self._test_fork_failure(\"_orig_os_fork\", atfork.os_fork_wrapper) def test_forkpty_wrapper_failure(self):", "may not use this file except in compliance with the", "self._child, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self._assert_expected_child_stderr(sys.stderr.getvalue()) except BaseException: try: traceback.print_exc()", "AtforkTest(unittest.TestCase): def setUp(self): atfork.monkeypatch_os_fork_functions() self.calls = [] self.orig_stderr = sys.stderr", "self.calls ) self.assertFalse(atfork._fork_lock.locked()) self.assertEqual(0, os.waitpid(pid, 0)[1], \"error in child\") self._assert_expected_parent_stderr(sys.stderr.getvalue())", "= [] atfork._parent_call_list = [] atfork._child_call_list = [] def tearDown(self):", "Un-monkeypatch the os module. ook. global os importlib.reload(os) sys.stderr =", "is expected\" in error_msg) self.assertEqual(1, error_msg.count(\"RuntimeError:\"), error_msg) def test_monkeypatching(self): if", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "atfork._child_call_list) if __debug__: self.assertRaises(AssertionError, atfork.atfork, 1, 2, 3) def test_call_atfork_list(self):", "pid == 0: # This should never happen but do", "should never happen but do this just in case. os._exit(0)", "this file except in compliance with the License. # You", "RuntimeError(\"This as the second parent error expected.\") def _raise_child(self): self._child()", "os.forkpty) def test_register_atfork_calls(self): # Test with both positional and keyword", "== 0: try: try: self.assertEqual( [self._pre, self._other, self._child, self._other], self.calls", "atfork._call_atfork_list([])) self.assertEqual([], atfork._call_atfork_list([self._pre])) def raise_something(): raise RuntimeError() errors = atfork._call_atfork_list([raise_something]", "errors out of order in:\\n%r\" % error_msg, ) self.assertEqual(2, error_msg.count(\"RuntimeError:\"))", "of order in:\\n%r\" % error_msg, ) self.assertEqual(2, error_msg.count(\"RuntimeError:\")) def _assert_expected_child_stderr(self,", "= fork_wrapper() if pid == 0: # This should never", "xTool.compat import StringIO import traceback import unittest from xTool import", "atfork.atfork(child=self._child) self.assertEqual([self._pre] * 2, atfork._prepare_call_list) self.assertEqual([self._parent] * 2, atfork._parent_call_list) self.assertEqual([self._child]", "self.calls ) self.assertFalse(atfork._fork_lock.locked()) self._assert_expected_child_stderr(sys.stderr.getvalue()) except BaseException: try: traceback.print_exc() self.orig_stderr.write(sys.stderr.getvalue()) finally:", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "we patched in. self.assertEqual(atfork.os_fork_wrapper, os.fork) self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty) def test_register_atfork_calls(self): #", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "atfork._fork_lock.locked(), \"atfork._fork_lock not released by an earlier test!\", ) #", "raise RuntimeError(\"This as the second parent error expected.\") def _raise_child(self):", "else: self.fail(\"Fork failed to fail!\") finally: setattr(atfork, orig_fork_attrname, orig_orig_fork) def", "do this just in case. os._exit(0) except OSError: self.assertEqual([self._pre, self._parent],", "self.assertFalse(atfork._fork_lock.locked()) self.assertEqual(0, os.waitpid(pid, 0)[1], \"error in child\") self._assert_expected_parent_stderr(sys.stderr.getvalue()) def test_os_fork_wrapper(self):", "raise_something(): raise RuntimeError() errors = atfork._call_atfork_list([raise_something] * 2) self.assertEqual(2, len(errors))", "error_msg) self.assertTrue((\"second parent error\" in error_msg), error_msg) self.assertTrue( (error_msg.index(\"first parent\")", "self.assertTrue( (error_msg.index(\"first parent\") < error_msg.index(\"second parent\")), \"first and second errors", "Contributor Agreement. # # Author: <NAME> <<EMAIL>> \"\"\"Tests for atfork.\"\"\"", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "positional and keyword arguments as well as None. atfork.atfork(self._pre, self._parent,", "to test on this platform. self.assertTrue(callable(atfork._orig_os_fork)) self.assertTrue(callable(atfork._orig_os_forkpty)) # The os", "pid = fork_func() if pid == 0: try: try: self.assertEqual(", "orig_orig_fork = getattr(atfork, orig_fork_attrname) try: setattr(atfork, orig_fork_attrname, failing_fork) try: pid", "atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) if __name__", "as None. atfork.atfork(self._pre, self._parent, self._child) atfork.atfork(prepare=self._pre) atfork.atfork(parent=self._parent) atfork.atfork(child=self._child) self.assertEqual([self._pre] *", "# This should never happen but do this just in", "should not be equal. self.assertNotEqual(atfork._orig_os_fork, os.fork) self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty) # These", "for exc_info in errors: self.assertEqual(RuntimeError, exc_info[0]) def _test_a_fork_wrapper(self, fork_func): sys.stderr", "2009 Google Inc. # # Licensed under the Apache License,", "* 2, atfork._prepare_call_list) self.assertEqual([self._parent] * 2, atfork._parent_call_list) self.assertEqual([self._child] * 2,", "as well as None. atfork.atfork(self._pre, self._parent, self._child) atfork.atfork(prepare=self._pre) atfork.atfork(parent=self._parent) atfork.atfork(child=self._child)", "self.calls) else: self.fail(\"Fork failed to fail!\") finally: setattr(atfork, orig_fork_attrname, orig_orig_fork)", "self._child() raise RuntimeError(\"This child error is expected.\") def _assert_expected_parent_stderr(self, error_msg):", "\"error in child\") self._assert_expected_parent_stderr(sys.stderr.getvalue()) def test_os_fork_wrapper(self): self._test_a_fork_wrapper(os.fork) def test_os_forkpty_wrapper(self): self._test_a_fork_wrapper(lambda:", "* 2) self.assertEqual(2, len(errors)) for exc_info in errors: self.assertEqual(RuntimeError, exc_info[0])", "was patched, these should not be equal. self.assertNotEqual(atfork._orig_os_fork, os.fork) self.assertNotEqual(atfork._orig_os_forkpty,", "2, atfork._parent_call_list) self.assertEqual([self._child] * 2, atfork._child_call_list) if __debug__: self.assertRaises(AssertionError, atfork.atfork,", "test_register_atfork_calls(self): # Test with both positional and keyword arguments as", "getattr(atfork, orig_fork_attrname) try: setattr(atfork, orig_fork_attrname, failing_fork) try: pid = fork_wrapper()", "in case. os._exit(0) except OSError: self.assertEqual([self._pre, self._parent], self.calls) else: self.fail(\"Fork", "traceback.print_exc() self.orig_stderr.write(sys.stderr.getvalue()) finally: os._exit(1) finally: os._exit(0) else: self.assertEqual( [self._pre, self._other,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "_parent(self): self.calls.append(self._parent) def _child(self): self.calls.append(self._child) def _other(self): self.calls.append(self._other) def _raise_pre(self):", "and # limitations under the License. # # Licensed to", "if __debug__: self.assertRaises(AssertionError, atfork.atfork, 1, 2, 3) def test_call_atfork_list(self): self.assertEqual([],", "atfork.os_forkpty_wrapper) def test_multiple_monkeypatch_safe(self): self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)", "os.fork) self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty) # These are the wrapped versions we", "an earlier test!\", ) # Unregister calls registered by earlier", "error_msg) self.assertTrue( (error_msg.index(\"first parent\") < error_msg.index(\"second parent\")), \"first and second", "self._parent, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self.assertEqual(0, os.waitpid(pid, 0)[1], \"error in", "atfork class AtforkTest(unittest.TestCase): def setUp(self): atfork.monkeypatch_os_fork_functions() self.calls = [] self.orig_stderr", "or implied. # See the License for the specific language", "for atfork.\"\"\" import os import sys import importlib from xTool.compat", "hasattr(os, \"fork\"): return # Nothing to test on this platform.", "sys import importlib from xTool.compat import StringIO import traceback import", "setattr(atfork, orig_fork_attrname, failing_fork) try: pid = fork_wrapper() if pid ==", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "# Licensed to the PSF under a Contributor Agreement. #", "self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) if", "self.calls = [] self.orig_stderr = sys.stderr self.assertFalse( atfork._fork_lock.locked(), \"atfork._fork_lock not", "def _test_fork_failure(self, orig_fork_attrname, fork_wrapper): def failing_fork(): raise OSError(0, \"testing a", "def raise_something(): raise RuntimeError() errors = atfork._call_atfork_list([raise_something] * 2) self.assertEqual(2,", "_test_fork_failure(self, orig_fork_attrname, fork_wrapper): def failing_fork(): raise OSError(0, \"testing a fork", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "expected.\") def _raise_parent(self): self._parent() raise RuntimeError(\"This as the second parent", "from xTool import atfork class AtforkTest(unittest.TestCase): def setUp(self): atfork.monkeypatch_os_fork_functions() self.calls", "if not hasattr(os, \"fork\"): return # Nothing to test on", "def failing_fork(): raise OSError(0, \"testing a fork failure\") atfork.atfork(self._pre, self._parent,", "self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper)", "Google Inc. # # Licensed under the Apache License, Version", "atfork._prepare_call_list = [] atfork._parent_call_list = [] atfork._child_call_list = [] def", "self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions()", "orig_fork_attrname, orig_orig_fork) def test_fork_wrapper_failure(self): self._test_fork_failure(\"_orig_os_fork\", atfork.os_fork_wrapper) def test_forkpty_wrapper_failure(self): self._test_fork_failure(\"_orig_os_forkpty\", atfork.os_forkpty_wrapper)", "0)[1], \"error in child\") self._assert_expected_parent_stderr(sys.stderr.getvalue()) def test_os_fork_wrapper(self): self._test_a_fork_wrapper(os.fork) def test_os_forkpty_wrapper(self):", "atfork.atfork(self._pre, self._parent, self._child) orig_orig_fork = getattr(atfork, orig_fork_attrname) try: setattr(atfork, orig_fork_attrname,", "(the \"License\"); # you may not use this file except", "importlib.reload(os) sys.stderr = self.orig_stderr def _pre(self): self.calls.append(self._pre) def _parent(self): self.calls.append(self._parent)", "# you may not use this file except in compliance", "in:\\n%r\" % error_msg, ) self.assertEqual(2, error_msg.count(\"RuntimeError:\")) def _assert_expected_child_stderr(self, error_msg): self.assertTrue(\"child", "if pid == 0: try: try: self.assertEqual( [self._pre, self._other, self._child,", "atfork._prepare_call_list) self.assertEqual([self._parent] * 2, atfork._parent_call_list) self.assertEqual([self._child] * 2, atfork._child_call_list) if", "os importlib.reload(os) sys.stderr = self.orig_stderr def _pre(self): self.calls.append(self._pre) def _parent(self):", "self._parent], self.calls) else: self.fail(\"Fork failed to fail!\") finally: setattr(atfork, orig_fork_attrname,", "# # Unless required by applicable law or agreed to", "self.assertEqual([self._pre, self._parent], self.calls) else: self.fail(\"Fork failed to fail!\") finally: setattr(atfork,", "self.assertRaises(AssertionError, atfork.atfork, 1, 2, 3) def test_call_atfork_list(self): self.assertEqual([], atfork._call_atfork_list([])) self.assertEqual([],", "fork failure\") atfork.atfork(self._pre, self._parent, self._child) orig_orig_fork = getattr(atfork, orig_fork_attrname) try:", "atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) if __name__ == \"__main__\":", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "StringIO import traceback import unittest from xTool import atfork class", "self._test_a_fork_wrapper(lambda: os.forkpty()[0]) def _test_fork_failure(self, orig_fork_attrname, fork_wrapper): def failing_fork(): raise OSError(0,", "Version 2.0 (the \"License\"); # you may not use this", "self._child) orig_orig_fork = getattr(atfork, orig_fork_attrname) try: setattr(atfork, orig_fork_attrname, failing_fork) try:", "def _pre(self): self.calls.append(self._pre) def _parent(self): self.calls.append(self._parent) def _child(self): self.calls.append(self._child) def", "try: setattr(atfork, orig_fork_attrname, failing_fork) try: pid = fork_wrapper() if pid", "atfork._child_call_list = [] def tearDown(self): # Un-monkeypatch the os module.", "self.assertEqual(2, error_msg.count(\"RuntimeError:\")) def _assert_expected_child_stderr(self, error_msg): self.assertTrue(\"child error is expected\" in", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "PSF under a Contributor Agreement. # # Author: <NAME> <<EMAIL>>", "% error_msg, ) self.assertEqual(2, error_msg.count(\"RuntimeError:\")) def _assert_expected_child_stderr(self, error_msg): self.assertTrue(\"child error", "self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self.assertEqual(0, os.waitpid(pid, 0)[1], \"error in child\")", "self._parent, self._child) atfork.atfork(prepare=self._pre) atfork.atfork(parent=self._parent) atfork.atfork(child=self._child) self.assertEqual([self._pre] * 2, atfork._prepare_call_list) self.assertEqual([self._parent]", "self._assert_expected_parent_stderr(sys.stderr.getvalue()) def test_os_fork_wrapper(self): self._test_a_fork_wrapper(os.fork) def test_os_forkpty_wrapper(self): self._test_a_fork_wrapper(lambda: os.forkpty()[0]) def _test_fork_failure(self,", "_raise_child(self): self._child() raise RuntimeError(\"This child error is expected.\") def _assert_expected_parent_stderr(self,", "by applicable law or agreed to in writing, software #", "test_forkpty_wrapper_failure(self): self._test_fork_failure(\"_orig_os_forkpty\", atfork.os_forkpty_wrapper) def test_multiple_monkeypatch_safe(self): self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions()", "error_msg.count(\"RuntimeError:\")) def _assert_expected_child_stderr(self, error_msg): self.assertTrue(\"child error is expected\" in error_msg)", "self._test_fork_failure(\"_orig_os_forkpty\", atfork.os_forkpty_wrapper) def test_multiple_monkeypatch_safe(self): self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork,", "sys.stderr = self.orig_stderr def _pre(self): self.calls.append(self._pre) def _parent(self): self.calls.append(self._parent) def", "self.assertTrue(\"child error is expected\" in error_msg) self.assertEqual(1, error_msg.count(\"RuntimeError:\"), error_msg) def", "self.assertEqual(1, error_msg.count(\"RuntimeError:\"), error_msg) def test_monkeypatching(self): if not hasattr(os, \"fork\"): return", "self.assertEqual( [self._pre, self._other, self._child, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self._assert_expected_child_stderr(sys.stderr.getvalue()) except", "by an earlier test!\", ) # Unregister calls registered by", "# Copyright 2009 Google Inc. # # Licensed under the", "(error_msg.index(\"first parent\") < error_msg.index(\"second parent\")), \"first and second errors out", "earlier tests. atfork._prepare_call_list = [] atfork._parent_call_list = [] atfork._child_call_list =", "exc_info in errors: self.assertEqual(RuntimeError, exc_info[0]) def _test_a_fork_wrapper(self, fork_func): sys.stderr =", "# The os module was patched, these should not be", "os module. ook. global os importlib.reload(os) sys.stderr = self.orig_stderr def", "self.assertNotEqual(atfork._orig_os_fork, os.fork) self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty) # These are the wrapped versions", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "#!/usr/bin/python # # Copyright 2009 Google Inc. # # Licensed", "Unless required by applicable law or agreed to in writing,", "2) self.assertEqual(2, len(errors)) for exc_info in errors: self.assertEqual(RuntimeError, exc_info[0]) def", "def _assert_expected_child_stderr(self, error_msg): self.assertTrue(\"child error is expected\" in error_msg) self.assertEqual(1,", "0: # This should never happen but do this just", "raise RuntimeError(\"This child error is expected.\") def _assert_expected_parent_stderr(self, error_msg): self.assertTrue((\"first", "atfork.atfork, 1, 2, 3) def test_call_atfork_list(self): self.assertEqual([], atfork._call_atfork_list([])) self.assertEqual([], atfork._call_atfork_list([self._pre]))", "the specific language governing permissions and # limitations under the", "_child(self): self.calls.append(self._child) def _other(self): self.calls.append(self._other) def _raise_pre(self): self._pre() raise RuntimeError(\"This", "applicable law or agreed to in writing, software # distributed", "well as None. atfork.atfork(self._pre, self._parent, self._child) atfork.atfork(prepare=self._pre) atfork.atfork(parent=self._parent) atfork.atfork(child=self._child) self.assertEqual([self._pre]", "self.assertEqual([], atfork._call_atfork_list([])) self.assertEqual([], atfork._call_atfork_list([self._pre])) def raise_something(): raise RuntimeError() errors =", "self.assertEqual( [self._pre, self._other, self._parent, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self.assertEqual(0, os.waitpid(pid,", "This should never happen but do this just in case.", "with both positional and keyword arguments as well as None.", "self._assert_expected_child_stderr(sys.stderr.getvalue()) except BaseException: try: traceback.print_exc() self.orig_stderr.write(sys.stderr.getvalue()) finally: os._exit(1) finally: os._exit(0)", "These are the wrapped versions we patched in. self.assertEqual(atfork.os_fork_wrapper, os.fork)", "sys.stderr = StringIO() # restored in tearDown atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child)", "_assert_expected_parent_stderr(self, error_msg): self.assertTrue((\"first parent error\" in error_msg), error_msg) self.assertTrue((\"second parent", "in writing, software # distributed under the License is distributed", "governing permissions and # limitations under the License. # #", "__debug__: self.assertRaises(AssertionError, atfork.atfork, 1, 2, 3) def test_call_atfork_list(self): self.assertEqual([], atfork._call_atfork_list([]))", "len(errors)) for exc_info in errors: self.assertEqual(RuntimeError, exc_info[0]) def _test_a_fork_wrapper(self, fork_func):", "error_msg, ) self.assertEqual(2, error_msg.count(\"RuntimeError:\")) def _assert_expected_child_stderr(self, error_msg): self.assertTrue(\"child error is", "finally: os._exit(0) else: self.assertEqual( [self._pre, self._other, self._parent, self._other], self.calls )", "test_call_atfork_list(self): self.assertEqual([], atfork._call_atfork_list([])) self.assertEqual([], atfork._call_atfork_list([self._pre])) def raise_something(): raise RuntimeError() errors", "self.orig_stderr = sys.stderr self.assertFalse( atfork._fork_lock.locked(), \"atfork._fork_lock not released by an", "self._other, self._child, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self._assert_expected_child_stderr(sys.stderr.getvalue()) except BaseException: try:", "self.assertEqual([self._pre] * 2, atfork._prepare_call_list) self.assertEqual([self._parent] * 2, atfork._parent_call_list) self.assertEqual([self._child] *", "def _child(self): self.calls.append(self._child) def _other(self): self.calls.append(self._other) def _raise_pre(self): self._pre() raise", "fork_func() if pid == 0: try: try: self.assertEqual( [self._pre, self._other,", "but do this just in case. os._exit(0) except OSError: self.assertEqual([self._pre,", "by earlier tests. atfork._prepare_call_list = [] atfork._parent_call_list = [] atfork._child_call_list", "Licensed to the PSF under a Contributor Agreement. # #", "equal. self.assertNotEqual(atfork._orig_os_fork, os.fork) self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty) # These are the wrapped", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "test_os_forkpty_wrapper(self): self._test_a_fork_wrapper(lambda: os.forkpty()[0]) def _test_fork_failure(self, orig_fork_attrname, fork_wrapper): def failing_fork(): raise", "\"first and second errors out of order in:\\n%r\" % error_msg,", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "self._test_a_fork_wrapper(os.fork) def test_os_forkpty_wrapper(self): self._test_a_fork_wrapper(lambda: os.forkpty()[0]) def _test_fork_failure(self, orig_fork_attrname, fork_wrapper): def", "self.assertFalse(atfork._fork_lock.locked()) self._assert_expected_child_stderr(sys.stderr.getvalue()) except BaseException: try: traceback.print_exc() self.orig_stderr.write(sys.stderr.getvalue()) finally: os._exit(1) finally:", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "error_msg): self.assertTrue(\"child error is expected\" in error_msg) self.assertEqual(1, error_msg.count(\"RuntimeError:\"), error_msg)", "parent error expected.\") def _raise_parent(self): self._parent() raise RuntimeError(\"This as the", "return # Nothing to test on this platform. self.assertTrue(callable(atfork._orig_os_fork)) self.assertTrue(callable(atfork._orig_os_forkpty))", "self._raise_parent, self._raise_child) atfork.atfork(self._other, self._other, self._other) pid = fork_func() if pid", "Agreement. # # Author: <NAME> <<EMAIL>> \"\"\"Tests for atfork.\"\"\" import", "registered by earlier tests. atfork._prepare_call_list = [] atfork._parent_call_list = []", "to fail!\") finally: setattr(atfork, orig_fork_attrname, orig_orig_fork) def test_fork_wrapper_failure(self): self._test_fork_failure(\"_orig_os_fork\", atfork.os_fork_wrapper)", "parent error\" in error_msg), error_msg) self.assertTrue((\"second parent error\" in error_msg),", "_other(self): self.calls.append(self._other) def _raise_pre(self): self._pre() raise RuntimeError(\"This as the first", "atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty,", "* 2, atfork._child_call_list) if __debug__: self.assertRaises(AssertionError, atfork.atfork, 1, 2, 3)", "patched in. self.assertEqual(atfork.os_fork_wrapper, os.fork) self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty) def test_register_atfork_calls(self): # Test", "error_msg): self.assertTrue((\"first parent error\" in error_msg), error_msg) self.assertTrue((\"second parent error\"", "second parent error expected.\") def _raise_child(self): self._child() raise RuntimeError(\"This child", "# Unregister calls registered by earlier tests. atfork._prepare_call_list = []", ") self.assertFalse(atfork._fork_lock.locked()) self.assertEqual(0, os.waitpid(pid, 0)[1], \"error in child\") self._assert_expected_parent_stderr(sys.stderr.getvalue()) def", "just in case. os._exit(0) except OSError: self.assertEqual([self._pre, self._parent], self.calls) else:", "the License for the specific language governing permissions and #", "== 0: # This should never happen but do this", "calls registered by earlier tests. atfork._prepare_call_list = [] atfork._parent_call_list =", "module. ook. global os importlib.reload(os) sys.stderr = self.orig_stderr def _pre(self):", "Apache License, Version 2.0 (the \"License\"); # you may not", "error expected.\") def _raise_parent(self): self._parent() raise RuntimeError(\"This as the second", "atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child) atfork.atfork(self._other, self._other, self._other) pid = fork_func() if", "either express or implied. # See the License for the", "tearDown(self): # Un-monkeypatch the os module. ook. global os importlib.reload(os)", "self.assertTrue((\"first parent error\" in error_msg), error_msg) self.assertTrue((\"second parent error\" in", "def test_register_atfork_calls(self): # Test with both positional and keyword arguments", "os.waitpid(pid, 0)[1], \"error in child\") self._assert_expected_parent_stderr(sys.stderr.getvalue()) def test_os_fork_wrapper(self): self._test_a_fork_wrapper(os.fork) def", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "test on this platform. self.assertTrue(callable(atfork._orig_os_fork)) self.assertTrue(callable(atfork._orig_os_forkpty)) # The os module", "# Nothing to test on this platform. self.assertTrue(callable(atfork._orig_os_fork)) self.assertTrue(callable(atfork._orig_os_forkpty)) #", "self.orig_stderr.write(sys.stderr.getvalue()) finally: os._exit(1) finally: os._exit(0) else: self.assertEqual( [self._pre, self._other, self._parent,", "except BaseException: try: traceback.print_exc() self.orig_stderr.write(sys.stderr.getvalue()) finally: os._exit(1) finally: os._exit(0) else:", "global os importlib.reload(os) sys.stderr = self.orig_stderr def _pre(self): self.calls.append(self._pre) def", "test_multiple_monkeypatch_safe(self): self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper)", "self.calls.append(self._child) def _other(self): self.calls.append(self._other) def _raise_pre(self): self._pre() raise RuntimeError(\"This as", "self._child) atfork.atfork(prepare=self._pre) atfork.atfork(parent=self._parent) atfork.atfork(child=self._child) self.assertEqual([self._pre] * 2, atfork._prepare_call_list) self.assertEqual([self._parent] *", "both positional and keyword arguments as well as None. atfork.atfork(self._pre,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "raise RuntimeError() errors = atfork._call_atfork_list([raise_something] * 2) self.assertEqual(2, len(errors)) for", "atfork.atfork(self._other, self._other, self._other) pid = fork_func() if pid == 0:", "<reponame>luciferliu/xTools #!/usr/bin/python # # Copyright 2009 Google Inc. # #", "def test_fork_wrapper_failure(self): self._test_fork_failure(\"_orig_os_fork\", atfork.os_fork_wrapper) def test_forkpty_wrapper_failure(self): self._test_fork_failure(\"_orig_os_forkpty\", atfork.os_forkpty_wrapper) def test_multiple_monkeypatch_safe(self):", "os module was patched, these should not be equal. self.assertNotEqual(atfork._orig_os_fork,", ") self.assertEqual(2, error_msg.count(\"RuntimeError:\")) def _assert_expected_child_stderr(self, error_msg): self.assertTrue(\"child error is expected\"", "self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) if __name__ ==", "def _raise_pre(self): self._pre() raise RuntimeError(\"This as the first parent error", "self._raise_child) atfork.atfork(self._other, self._other, self._other) pid = fork_func() if pid ==", "limitations under the License. # # Licensed to the PSF", "self._parent() raise RuntimeError(\"This as the second parent error expected.\") def", "failure\") atfork.atfork(self._pre, self._parent, self._child) orig_orig_fork = getattr(atfork, orig_fork_attrname) try: setattr(atfork,", "from xTool.compat import StringIO import traceback import unittest from xTool", "\"License\"); # you may not use this file except in", "[] atfork._child_call_list = [] def tearDown(self): # Un-monkeypatch the os", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "under the License. # # Licensed to the PSF under", "# limitations under the License. # # Licensed to the", "platform. self.assertTrue(callable(atfork._orig_os_fork)) self.assertTrue(callable(atfork._orig_os_forkpty)) # The os module was patched, these", "expected.\") def _raise_child(self): self._child() raise RuntimeError(\"This child error is expected.\")", "# distributed under the License is distributed on an \"AS", "atfork._parent_call_list = [] atfork._child_call_list = [] def tearDown(self): # Un-monkeypatch", "# Unless required by applicable law or agreed to in", "atfork._call_atfork_list([raise_something] * 2) self.assertEqual(2, len(errors)) for exc_info in errors: self.assertEqual(RuntimeError,", "Author: <NAME> <<EMAIL>> \"\"\"Tests for atfork.\"\"\" import os import sys", "self.calls.append(self._other) def _raise_pre(self): self._pre() raise RuntimeError(\"This as the first parent", "self.assertEqual(0, os.waitpid(pid, 0)[1], \"error in child\") self._assert_expected_parent_stderr(sys.stderr.getvalue()) def test_os_fork_wrapper(self): self._test_a_fork_wrapper(os.fork)", "def test_os_forkpty_wrapper(self): self._test_a_fork_wrapper(lambda: os.forkpty()[0]) def _test_fork_failure(self, orig_fork_attrname, fork_wrapper): def failing_fork():", "sys.stderr self.assertFalse( atfork._fork_lock.locked(), \"atfork._fork_lock not released by an earlier test!\",", "os._exit(0) except OSError: self.assertEqual([self._pre, self._parent], self.calls) else: self.fail(\"Fork failed to", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "error_msg) self.assertEqual(1, error_msg.count(\"RuntimeError:\"), error_msg) def test_monkeypatching(self): if not hasattr(os, \"fork\"):", "OSError: self.assertEqual([self._pre, self._parent], self.calls) else: self.fail(\"Fork failed to fail!\") finally:", "Test with both positional and keyword arguments as well as", "tests. atfork._prepare_call_list = [] atfork._parent_call_list = [] atfork._child_call_list = []", "You may obtain a copy of the License at #", "try: try: self.assertEqual( [self._pre, self._other, self._child, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked())", "RuntimeError() errors = atfork._call_atfork_list([raise_something] * 2) self.assertEqual(2, len(errors)) for exc_info", "def _parent(self): self.calls.append(self._parent) def _child(self): self.calls.append(self._child) def _other(self): self.calls.append(self._other) def", "fork_wrapper() if pid == 0: # This should never happen", "class AtforkTest(unittest.TestCase): def setUp(self): atfork.monkeypatch_os_fork_functions() self.calls = [] self.orig_stderr =", "import traceback import unittest from xTool import atfork class AtforkTest(unittest.TestCase):", "the Apache License, Version 2.0 (the \"License\"); # you may", "first parent error expected.\") def _raise_parent(self): self._parent() raise RuntimeError(\"This as", "under a Contributor Agreement. # # Author: <NAME> <<EMAIL>> \"\"\"Tests", "def test_os_fork_wrapper(self): self._test_a_fork_wrapper(os.fork) def test_os_forkpty_wrapper(self): self._test_a_fork_wrapper(lambda: os.forkpty()[0]) def _test_fork_failure(self, orig_fork_attrname,", "<NAME> <<EMAIL>> \"\"\"Tests for atfork.\"\"\" import os import sys import", "error_msg.count(\"RuntimeError:\"), error_msg) def test_monkeypatching(self): if not hasattr(os, \"fork\"): return #", "0: try: try: self.assertEqual( [self._pre, self._other, self._child, self._other], self.calls )", "os._exit(0) else: self.assertEqual( [self._pre, self._other, self._parent, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked())", "atfork._parent_call_list) self.assertEqual([self._child] * 2, atfork._child_call_list) if __debug__: self.assertRaises(AssertionError, atfork.atfork, 1," ]
[ "pathlib import Path __all__ = ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs'] def", "end with a filename, else the final directory won't be", "subdir in scan: leveltwo_subdirs.append(Path(subdir)) if Path(subdir).is_dir() else None return leveltwo_subdirs", "'get_all_data_files', 'get_subsubdirs'] def list_files_recur(path): \"\"\" Cheater function that wraps path.rglob().", "= ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs'] def list_files_recur(path): \"\"\" Cheater function", "Path path: path to list recursively :return list: list of", "supplied path \"\"\" leveltwo_subdirs = [] immediate_subdirs = [os.scandir(subdir) for", "until the path is valid. :param Path path: must end", "to search :param str filetype: str, \".type\" of file to", "search :param str filetype: str, \".type\" of file to search", "a filepath to a file or folder, it creates directories", "= [os.scandir(subdir) for subdir in os.scandir(path) if Path(subdir).is_dir()] for scan", "parts[i] if file: pass else: if not path_to_check.exists(): path_to_check.mkdir() def", "final directory won't be created :param bool file: Boolean, does", "[] immediate_subdirs = [os.scandir(subdir) for subdir in os.scandir(path) if Path(subdir).is_dir()]", ":param Path path: Path to search :param str filetype: str,", "i in range(1, len(parts)): if not path_to_check.exists(): path_to_check.mkdir() path_to_check =", "path_to_check.mkdir() def get_all_data_files(path, filetype): \"\"\" Recursively search the given directory", "file in path.rglob('*'): files.append(file) return files def scan_and_create_dir_tree(path, file=True): \"\"\"", ".xxx files. :param Path path: Path to search :param str", "that wraps path.rglob(). :param Path path: path to list recursively", "to be created. When specified with a filepath to a", "specified with a filepath to a file or folder, it", "list: list of file-like Path objects \"\"\" files = list_files_recur(path)", "Path objects \"\"\" files = list_files_recur(path) files[:] = [file for", "files def get_subsubdirs(path): \"\"\" Get the second-level subdirectories of the", "file to search for :return list: list of file-like Path", "a sample return would be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc'] :param str", "would be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc'] :param str path: :return list:", "Path instances for all paths found two levels below the", "scan in immediate_subdirs: for subdir in scan: leveltwo_subdirs.append(Path(subdir)) if Path(subdir).is_dir()", "for all paths found two levels below the supplied path", "in os.scandir(path) if Path(subdir).is_dir()] for scan in immediate_subdirs: for subdir", "for file in path.rglob('*'): files.append(file) return files def scan_and_create_dir_tree(path, file=True):", "not path_to_check.exists(): path_to_check.mkdir() path_to_check = path_to_check / parts[i] if file:", "not, path.parts[-1] will be created :return None: \"\"\" parts =", "objects \"\"\" files = [] for file in path.rglob('*'): files.append(file)", "all the necessary directories for the file at the end", "given path end with a file? If not, path.parts[-1] will", "\"\"\" leveltwo_subdirs = [] immediate_subdirs = [os.scandir(subdir) for subdir in", "\".type\" of file to search for :return list: list of", "search the given directory for .xxx files. :param Path path:", "return files def get_subsubdirs(path): \"\"\" Get the second-level subdirectories of", ":param str path: :return list: list containing Path instances for", "Path(subdir).is_dir()] for scan in immediate_subdirs: for subdir in scan: leveltwo_subdirs.append(Path(subdir))", "files = [] for file in path.rglob('*'): files.append(file) return files", "file-like Path objects \"\"\" files = list_files_recur(path) files[:] = [file", "does the given path end with a file? If not,", "\"\"\" parts = path.parts path_to_check = Path(parts[0]) for i in", "won't be created :param bool file: Boolean, does the given", "of the given path. If given path 'a/b', a sample", "is valid. :param Path path: must end with a filename,", "a file or folder, it creates directories until the path", "def list_files_recur(path): \"\"\" Cheater function that wraps path.rglob(). :param Path", "end with a file? If not, path.parts[-1] will be created", "Recursively search the given directory for .xxx files. :param Path", "Path path: Path to search :param str filetype: str, \".type\"", "files.append(file) return files def scan_and_create_dir_tree(path, file=True): \"\"\" Creates all the", "[file for file in files if filetype in file.name] return", "found two levels below the supplied path \"\"\" leveltwo_subdirs =", "/ parts[i] if file: pass else: if not path_to_check.exists(): path_to_check.mkdir()", "for subdir in os.scandir(path) if Path(subdir).is_dir()] for scan in immediate_subdirs:", "files = list_files_recur(path) files[:] = [file for file in files", "to list recursively :return list: list of Path objects \"\"\"", "path_to_check.exists(): path_to_check.mkdir() def get_all_data_files(path, filetype): \"\"\" Recursively search the given", "list: list of Path objects \"\"\" files = [] for", "for file in files if filetype in file.name] return files", "if not path_to_check.exists(): path_to_check.mkdir() path_to_check = path_to_check / parts[i] if", "\"\"\" Get the second-level subdirectories of the given path. If", "at the end of path to be created. When specified", ":return list: list of Path objects \"\"\" files = []", "filetype in file.name] return files def get_subsubdirs(path): \"\"\" Get the", "a filename, else the final directory won't be created :param", "If not, path.parts[-1] will be created :return None: \"\"\" parts", "path \"\"\" leveltwo_subdirs = [] immediate_subdirs = [os.scandir(subdir) for subdir", "Creates all the necessary directories for the file at the", "if file: pass else: if not path_to_check.exists(): path_to_check.mkdir() def get_all_data_files(path,", "When specified with a filepath to a file or folder,", "be created :return None: \"\"\" parts = path.parts path_to_check =", "def scan_and_create_dir_tree(path, file=True): \"\"\" Creates all the necessary directories for", "list: list containing Path instances for all paths found two", "in immediate_subdirs: for subdir in scan: leveltwo_subdirs.append(Path(subdir)) if Path(subdir).is_dir() else", "must end with a filename, else the final directory won't", "the file at the end of path to be created.", "'a/b/c/etc'] :param str path: :return list: list containing Path instances", "files[:] = [file for file in files if filetype in", "directory for .xxx files. :param Path path: Path to search", "subdir in os.scandir(path) if Path(subdir).is_dir()] for scan in immediate_subdirs: for", "creates directories until the path is valid. :param Path path:", "necessary directories for the file at the end of path", "Get the second-level subdirectories of the given path. If given", "subdirectories of the given path. If given path 'a/b', a", "len(parts)): if not path_to_check.exists(): path_to_check.mkdir() path_to_check = path_to_check / parts[i]", "path: path to list recursively :return list: list of Path", "path is valid. :param Path path: must end with a", "the supplied path \"\"\" leveltwo_subdirs = [] immediate_subdirs = [os.scandir(subdir)", "path_to_check.exists(): path_to_check.mkdir() path_to_check = path_to_check / parts[i] if file: pass", "leveltwo_subdirs = [] immediate_subdirs = [os.scandir(subdir) for subdir in os.scandir(path)", "instances for all paths found two levels below the supplied", "parts = path.parts path_to_check = Path(parts[0]) for i in range(1,", "be created. When specified with a filepath to a file", "in path.rglob('*'): files.append(file) return files def scan_and_create_dir_tree(path, file=True): \"\"\" Creates", "file or folder, it creates directories until the path is", "file in files if filetype in file.name] return files def", "levels below the supplied path \"\"\" leveltwo_subdirs = [] immediate_subdirs", "or folder, it creates directories until the path is valid.", "given directory for .xxx files. :param Path path: Path to", "files if filetype in file.name] return files def get_subsubdirs(path): \"\"\"", "created :param bool file: Boolean, does the given path end", "[os.scandir(subdir) for subdir in os.scandir(path) if Path(subdir).is_dir()] for scan in", "in range(1, len(parts)): if not path_to_check.exists(): path_to_check.mkdir() path_to_check = path_to_check", "path.parts[-1] will be created :return None: \"\"\" parts = path.parts", "path.rglob(). :param Path path: path to list recursively :return list:", "file: pass else: if not path_to_check.exists(): path_to_check.mkdir() def get_all_data_files(path, filetype):", "def get_all_data_files(path, filetype): \"\"\" Recursively search the given directory for", "path.parts path_to_check = Path(parts[0]) for i in range(1, len(parts)): if", "of path to be created. When specified with a filepath", "wraps path.rglob(). :param Path path: path to list recursively :return", "path to be created. When specified with a filepath to", "file: Boolean, does the given path end with a file?", ":return list: list containing Path instances for all paths found", "\"\"\" Cheater function that wraps path.rglob(). :param Path path: path", "list containing Path instances for all paths found two levels", "the necessary directories for the file at the end of", "sample return would be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc'] :param str path:", "function that wraps path.rglob(). :param Path path: path to list", "directories until the path is valid. :param Path path: must", "path_to_check / parts[i] if file: pass else: if not path_to_check.exists():", "else the final directory won't be created :param bool file:", "for the file at the end of path to be", "str, \".type\" of file to search for :return list: list", "not path_to_check.exists(): path_to_check.mkdir() def get_all_data_files(path, filetype): \"\"\" Recursively search the", "of Path objects \"\"\" files = [] for file in", "a file? If not, path.parts[-1] will be created :return None:", "Boolean, does the given path end with a file? If", "Path __all__ = ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs'] def list_files_recur(path): \"\"\"", "scan_and_create_dir_tree(path, file=True): \"\"\" Creates all the necessary directories for the", ":return list: list of file-like Path objects \"\"\" files =", "import os from pathlib import Path __all__ = ['list_files_recur', 'scan_and_create_dir_tree',", "\"\"\" Recursively search the given directory for .xxx files. :param", "for subdir in scan: leveltwo_subdirs.append(Path(subdir)) if Path(subdir).is_dir() else None return", "folder, it creates directories until the path is valid. :param", "= [] immediate_subdirs = [os.scandir(subdir) for subdir in os.scandir(path) if", "path_to_check = path_to_check / parts[i] if file: pass else: if", "the end of path to be created. When specified with", "the given path end with a file? If not, path.parts[-1]", "str filetype: str, \".type\" of file to search for :return", "= [] for file in path.rglob('*'): files.append(file) return files def", "filetype): \"\"\" Recursively search the given directory for .xxx files.", "from pathlib import Path __all__ = ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs']", ":param Path path: must end with a filename, else the", "'a/b', a sample return would be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc'] :param", "If given path 'a/b', a sample return would be ['a/b/c/d',", "'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs'] def list_files_recur(path): \"\"\" Cheater function that wraps", "filepath to a file or folder, it creates directories until", "the given directory for .xxx files. :param Path path: Path", "= [file for file in files if filetype in file.name]", "the given path. If given path 'a/b', a sample return", "to search for :return list: list of file-like Path objects", "recursively :return list: list of Path objects \"\"\" files =", "the final directory won't be created :param bool file: Boolean,", "to a file or folder, it creates directories until the", "Path to search :param str filetype: str, \".type\" of file", "immediate_subdirs: for subdir in scan: leveltwo_subdirs.append(Path(subdir)) if Path(subdir).is_dir() else None", "two levels below the supplied path \"\"\" leveltwo_subdirs = []", "directories for the file at the end of path to", "created. When specified with a filepath to a file or", "below the supplied path \"\"\" leveltwo_subdirs = [] immediate_subdirs =", "directory won't be created :param bool file: Boolean, does the", "valid. :param Path path: must end with a filename, else", "Path(parts[0]) for i in range(1, len(parts)): if not path_to_check.exists(): path_to_check.mkdir()", "all paths found two levels below the supplied path \"\"\"", "filetype: str, \".type\" of file to search for :return list:", "end of path to be created. When specified with a", "= list_files_recur(path) files[:] = [file for file in files if", "path.rglob('*'): files.append(file) return files def scan_and_create_dir_tree(path, file=True): \"\"\" Creates all", "in files if filetype in file.name] return files def get_subsubdirs(path):", ":return None: \"\"\" parts = path.parts path_to_check = Path(parts[0]) for", "if not path_to_check.exists(): path_to_check.mkdir() def get_all_data_files(path, filetype): \"\"\" Recursively search", "will be created :return None: \"\"\" parts = path.parts path_to_check", "None: \"\"\" parts = path.parts path_to_check = Path(parts[0]) for i", "search for :return list: list of file-like Path objects \"\"\"", "given path 'a/b', a sample return would be ['a/b/c/d', 'a/b/c/d2',", "path_to_check.mkdir() path_to_check = path_to_check / parts[i] if file: pass else:", ":param str filetype: str, \".type\" of file to search for", "with a file? If not, path.parts[-1] will be created :return", "be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc'] :param str path: :return list: list", "if Path(subdir).is_dir()] for scan in immediate_subdirs: for subdir in scan:", "else: if not path_to_check.exists(): path_to_check.mkdir() def get_all_data_files(path, filetype): \"\"\" Recursively", "file at the end of path to be created. When", "list of Path objects \"\"\" files = [] for file", "for scan in immediate_subdirs: for subdir in scan: leveltwo_subdirs.append(Path(subdir)) if", "for i in range(1, len(parts)): if not path_to_check.exists(): path_to_check.mkdir() path_to_check", "created :return None: \"\"\" parts = path.parts path_to_check = Path(parts[0])", "import Path __all__ = ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs'] def list_files_recur(path):", "['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc'] :param str path: :return list: list containing", "if filetype in file.name] return files def get_subsubdirs(path): \"\"\" Get", "file=True): \"\"\" Creates all the necessary directories for the file", "given path. If given path 'a/b', a sample return would", "\"\"\" Creates all the necessary directories for the file at", "second-level subdirectories of the given path. If given path 'a/b',", "for .xxx files. :param Path path: Path to search :param", "['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs'] def list_files_recur(path): \"\"\" Cheater function that", "for :return list: list of file-like Path objects \"\"\" files", "of file to search for :return list: list of file-like", "files. :param Path path: Path to search :param str filetype:", "path to list recursively :return list: list of Path objects", "it creates directories until the path is valid. :param Path", "file.name] return files def get_subsubdirs(path): \"\"\" Get the second-level subdirectories", "with a filename, else the final directory won't be created", "\"\"\" files = [] for file in path.rglob('*'): files.append(file) return", "files def scan_and_create_dir_tree(path, file=True): \"\"\" Creates all the necessary directories", "with a filepath to a file or folder, it creates", "= Path(parts[0]) for i in range(1, len(parts)): if not path_to_check.exists():", "path: Path to search :param str filetype: str, \".type\" of", "def get_subsubdirs(path): \"\"\" Get the second-level subdirectories of the given", "return would be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc'] :param str path: :return", "immediate_subdirs = [os.scandir(subdir) for subdir in os.scandir(path) if Path(subdir).is_dir()] for", "Path path: must end with a filename, else the final", "path end with a file? If not, path.parts[-1] will be", "path_to_check = Path(parts[0]) for i in range(1, len(parts)): if not", "path: must end with a filename, else the final directory", "list of file-like Path objects \"\"\" files = list_files_recur(path) files[:]", "get_subsubdirs(path): \"\"\" Get the second-level subdirectories of the given path.", "path. If given path 'a/b', a sample return would be", "path: :return list: list containing Path instances for all paths", "os.scandir(path) if Path(subdir).is_dir()] for scan in immediate_subdirs: for subdir in", "list_files_recur(path): \"\"\" Cheater function that wraps path.rglob(). :param Path path:", "of file-like Path objects \"\"\" files = list_files_recur(path) files[:] =", "= path_to_check / parts[i] if file: pass else: if not", "[] for file in path.rglob('*'): files.append(file) return files def scan_and_create_dir_tree(path,", "filename, else the final directory won't be created :param bool", "be created :param bool file: Boolean, does the given path", ":param bool file: Boolean, does the given path end with", "list_files_recur(path) files[:] = [file for file in files if filetype", "file? If not, path.parts[-1] will be created :return None: \"\"\"", "the second-level subdirectories of the given path. If given path", "bool file: Boolean, does the given path end with a", "objects \"\"\" files = list_files_recur(path) files[:] = [file for file", "return files def scan_and_create_dir_tree(path, file=True): \"\"\" Creates all the necessary", "str path: :return list: list containing Path instances for all", "pass else: if not path_to_check.exists(): path_to_check.mkdir() def get_all_data_files(path, filetype): \"\"\"", "path 'a/b', a sample return would be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc']", "Path objects \"\"\" files = [] for file in path.rglob('*'):", "in file.name] return files def get_subsubdirs(path): \"\"\" Get the second-level", "get_all_data_files(path, filetype): \"\"\" Recursively search the given directory for .xxx", "= path.parts path_to_check = Path(parts[0]) for i in range(1, len(parts)):", "paths found two levels below the supplied path \"\"\" leveltwo_subdirs", "os from pathlib import Path __all__ = ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files',", "Cheater function that wraps path.rglob(). :param Path path: path to", ":param Path path: path to list recursively :return list: list", "range(1, len(parts)): if not path_to_check.exists(): path_to_check.mkdir() path_to_check = path_to_check /", "the path is valid. :param Path path: must end with", "'get_subsubdirs'] def list_files_recur(path): \"\"\" Cheater function that wraps path.rglob(). :param", "list recursively :return list: list of Path objects \"\"\" files", "containing Path instances for all paths found two levels below", "'a/b/c/d2', 'a/b/c/etc'] :param str path: :return list: list containing Path", "\"\"\" files = list_files_recur(path) files[:] = [file for file in", "__all__ = ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs'] def list_files_recur(path): \"\"\" Cheater" ]
[ "True def __init__(self, graph, attrs): mandatory_props = dict( type=self.op, op=self.op,", "of range(1, 3), because there are incorrectly # generated models", "type_infer=self.type_infer, in_ports_count=4, out_ports_count=3, ) super().__init__(graph, mandatory_props, attrs) def backend_attrs(self): return", "dict( type=self.op, op=self.op, version='opset6', infer=self.infer, reverse_infer=self.reverse_infer, type_infer=self.type_infer, in_ports_count=4, out_ports_count=3, )", "type=self.op, op=self.op, version='opset6', infer=self.infer, reverse_infer=self.reverse_infer, type_infer=self.type_infer, in_ports_count=4, out_ports_count=3, ) super().__init__(graph,", "node: ','.join(map(str, node['deltas_weights'])))] @staticmethod def infer(node): rois_num = node.max_detections_per_image #", "are incorrectly # generated models where ExperimentalDetectronDetectionOutput has 4 outputs.", "ExperimentalDetectronDetectionOutput(Op): op = 'ExperimentalDetectronDetectionOutput' enabled = True def __init__(self, graph,", "reverse_infer(node): set_input_shapes(node, shape_array([dynamic_dimension_value, 4]), shape_array([dynamic_dimension_value, node['num_classes'] * 4]), shape_array([dynamic_dimension_value, node['num_classes']]),", "max(node.out_ports().keys())) instead of range(1, 3), because there are incorrectly #", "for port_ind in range(1, 1 + max(node.out_ports().keys())): if not node.out_port(port_ind).disconnected():", "backend_attrs(self): return [ ('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()), 'max_detections_per_image', 'nms_threshold', 'num_classes',", "SPDX-License-Identifier: Apache-2.0 import numpy as np from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value,", "Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy", "range(1, 1 + max(node.out_ports().keys())): if not node.out_port(port_ind).disconnected(): node.out_port(port_ind).data.set_shape([rois_num]) @staticmethod def", "scores, batch indices # We use range(1, 1 + max(node.out_ports().keys()))", "node.max_detections_per_image # boxes node.out_port(0).data.set_shape([rois_num, 4]) # classes, scores, batch indices", "Apache-2.0 import numpy as np from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array,", "batch indices # We use range(1, 1 + max(node.out_ports().keys())) instead", "indices # We use range(1, 1 + max(node.out_ports().keys())) instead of", "contains batch indices @staticmethod def reverse_infer(node): set_input_shapes(node, shape_array([dynamic_dimension_value, 4]), shape_array([dynamic_dimension_value,", ") super().__init__(graph, mandatory_props, attrs) def backend_attrs(self): return [ ('class_agnostic_box_regression', lambda", "models where ExperimentalDetectronDetectionOutput has 4 outputs. for port_ind in range(1,", "infer(node): rois_num = node.max_detections_per_image # boxes node.out_port(0).data.set_shape([rois_num, 4]) # classes,", "where ExperimentalDetectronDetectionOutput has 4 outputs. for port_ind in range(1, 1", "def backend_attrs(self): return [ ('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()), 'max_detections_per_image', 'nms_threshold',", "indices @staticmethod def reverse_infer(node): set_input_shapes(node, shape_array([dynamic_dimension_value, 4]), shape_array([dynamic_dimension_value, node['num_classes'] *", "openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes from openvino.tools.mo.ops.op import Op class", "Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np from", "max(node.out_ports().keys())): if not node.out_port(port_ind).disconnected(): node.out_port(port_ind).data.set_shape([rois_num]) @staticmethod def type_infer(node): in_data_type =", "range(1, 1 + max(node.out_ports().keys())) instead of range(1, 3), because there", "from openvino.tools.mo.ops.op import Op class ExperimentalDetectronDetectionOutput(Op): op = 'ExperimentalDetectronDetectionOutput' enabled", "in range(1, 1 + max(node.out_ports().keys())): if not node.out_port(port_ind).disconnected(): node.out_port(port_ind).data.set_shape([rois_num]) @staticmethod", "'post_nms_count', 'score_threshold', 'max_delta_log_wh', ('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))] @staticmethod def", "node.out_port(0).set_data_type(in_data_type) node.out_port(1).set_data_type(np.int32) # the second output contains class indices node.out_port(2).set_data_type(in_data_type)", "openvino.tools.mo.ops.op import Op class ExperimentalDetectronDetectionOutput(Op): op = 'ExperimentalDetectronDetectionOutput' enabled =", "class indices node.out_port(2).set_data_type(in_data_type) if node.is_out_port_connected(3): node.out_port(3).set_data_type(np.int32) # the fourth output", "lambda node: ','.join(map(str, node['deltas_weights'])))] @staticmethod def infer(node): rois_num = node.max_detections_per_image", "op = 'ExperimentalDetectronDetectionOutput' enabled = True def __init__(self, graph, attrs):", "out_ports_count=3, ) super().__init__(graph, mandatory_props, attrs) def backend_attrs(self): return [ ('class_agnostic_box_regression',", "'max_delta_log_wh', ('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))] @staticmethod def infer(node): rois_num", "output contains class indices node.out_port(2).set_data_type(in_data_type) if node.is_out_port_connected(3): node.out_port(3).set_data_type(np.int32) # the", "use range(1, 1 + max(node.out_ports().keys())) instead of range(1, 3), because", "if node.is_out_port_connected(3): node.out_port(3).set_data_type(np.int32) # the fourth output contains batch indices", "Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np from openvino.tools.mo.front.common.partial_infer.utils", "node.in_port(0).get_data_type() node.out_port(0).set_data_type(in_data_type) node.out_port(1).set_data_type(np.int32) # the second output contains class indices", "indices node.out_port(2).set_data_type(in_data_type) if node.is_out_port_connected(3): node.out_port(3).set_data_type(np.int32) # the fourth output contains", "# generated models where ExperimentalDetectronDetectionOutput has 4 outputs. for port_ind", "+ max(node.out_ports().keys())) instead of range(1, 3), because there are incorrectly", "3), because there are incorrectly # generated models where ExperimentalDetectronDetectionOutput", "enabled = True def __init__(self, graph, attrs): mandatory_props = dict(", "set_input_shapes from openvino.tools.mo.ops.op import Op class ExperimentalDetectronDetectionOutput(Op): op = 'ExperimentalDetectronDetectionOutput'", "op=self.op, version='opset6', infer=self.infer, reverse_infer=self.reverse_infer, type_infer=self.type_infer, in_ports_count=4, out_ports_count=3, ) super().__init__(graph, mandatory_props,", "node.out_port(0).data.set_shape([rois_num, 4]) # classes, scores, batch indices # We use", "shape_array, set_input_shapes from openvino.tools.mo.ops.op import Op class ExperimentalDetectronDetectionOutput(Op): op =", "@staticmethod def infer(node): rois_num = node.max_detections_per_image # boxes node.out_port(0).data.set_shape([rois_num, 4])", "in_ports_count=4, out_ports_count=3, ) super().__init__(graph, mandatory_props, attrs) def backend_attrs(self): return [", "as np from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes from openvino.tools.mo.ops.op", "str(bool(node['class_agnostic_box_regression'])).lower()), 'max_detections_per_image', 'nms_threshold', 'num_classes', 'post_nms_count', 'score_threshold', 'max_delta_log_wh', ('deltas_weights', lambda node:", "classes, scores, batch indices # We use range(1, 1 +", "if not node.out_port(port_ind).disconnected(): node.out_port(port_ind).data.set_shape([rois_num]) @staticmethod def type_infer(node): in_data_type = node.in_port(0).get_data_type()", "range(1, 3), because there are incorrectly # generated models where", "lambda node: str(bool(node['class_agnostic_box_regression'])).lower()), 'max_detections_per_image', 'nms_threshold', 'num_classes', 'post_nms_count', 'score_threshold', 'max_delta_log_wh', ('deltas_weights',", "# boxes node.out_port(0).data.set_shape([rois_num, 4]) # classes, scores, batch indices #", "+ max(node.out_ports().keys())): if not node.out_port(port_ind).disconnected(): node.out_port(port_ind).data.set_shape([rois_num]) @staticmethod def type_infer(node): in_data_type", "infer=self.infer, reverse_infer=self.reverse_infer, type_infer=self.type_infer, in_ports_count=4, out_ports_count=3, ) super().__init__(graph, mandatory_props, attrs) def", "= 'ExperimentalDetectronDetectionOutput' enabled = True def __init__(self, graph, attrs): mandatory_props", "def type_infer(node): in_data_type = node.in_port(0).get_data_type() node.out_port(0).set_data_type(in_data_type) node.out_port(1).set_data_type(np.int32) # the second", "node.out_port(port_ind).disconnected(): node.out_port(port_ind).data.set_shape([rois_num]) @staticmethod def type_infer(node): in_data_type = node.in_port(0).get_data_type() node.out_port(0).set_data_type(in_data_type) node.out_port(1).set_data_type(np.int32)", "import dynamic_dimension_value, shape_array, set_input_shapes from openvino.tools.mo.ops.op import Op class ExperimentalDetectronDetectionOutput(Op):", "dynamic_dimension_value, shape_array, set_input_shapes from openvino.tools.mo.ops.op import Op class ExperimentalDetectronDetectionOutput(Op): op", "1 + max(node.out_ports().keys())): if not node.out_port(port_ind).disconnected(): node.out_port(port_ind).data.set_shape([rois_num]) @staticmethod def type_infer(node):", "rois_num = node.max_detections_per_image # boxes node.out_port(0).data.set_shape([rois_num, 4]) # classes, scores,", "incorrectly # generated models where ExperimentalDetectronDetectionOutput has 4 outputs. for", "import numpy as np from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes", "node.out_port(2).set_data_type(in_data_type) if node.is_out_port_connected(3): node.out_port(3).set_data_type(np.int32) # the fourth output contains batch", "attrs): mandatory_props = dict( type=self.op, op=self.op, version='opset6', infer=self.infer, reverse_infer=self.reverse_infer, type_infer=self.type_infer,", "port_ind in range(1, 1 + max(node.out_ports().keys())): if not node.out_port(port_ind).disconnected(): node.out_port(port_ind).data.set_shape([rois_num])", "attrs) def backend_attrs(self): return [ ('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()), 'max_detections_per_image',", "'num_classes', 'post_nms_count', 'score_threshold', 'max_delta_log_wh', ('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))] @staticmethod", "node.out_port(port_ind).data.set_shape([rois_num]) @staticmethod def type_infer(node): in_data_type = node.in_port(0).get_data_type() node.out_port(0).set_data_type(in_data_type) node.out_port(1).set_data_type(np.int32) #", "has 4 outputs. for port_ind in range(1, 1 + max(node.out_ports().keys())):", "def reverse_infer(node): set_input_shapes(node, shape_array([dynamic_dimension_value, 4]), shape_array([dynamic_dimension_value, node['num_classes'] * 4]), shape_array([dynamic_dimension_value,", "set_input_shapes(node, shape_array([dynamic_dimension_value, 4]), shape_array([dynamic_dimension_value, node['num_classes'] * 4]), shape_array([dynamic_dimension_value, node['num_classes']]), shape_array([1,", "node['deltas_weights'])))] @staticmethod def infer(node): rois_num = node.max_detections_per_image # boxes node.out_port(0).data.set_shape([rois_num,", "4]) # classes, scores, batch indices # We use range(1,", "node.out_port(3).set_data_type(np.int32) # the fourth output contains batch indices @staticmethod def", "contains class indices node.out_port(2).set_data_type(in_data_type) if node.is_out_port_connected(3): node.out_port(3).set_data_type(np.int32) # the fourth", "reverse_infer=self.reverse_infer, type_infer=self.type_infer, in_ports_count=4, out_ports_count=3, ) super().__init__(graph, mandatory_props, attrs) def backend_attrs(self):", "('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))] @staticmethod def infer(node): rois_num =", "node.is_out_port_connected(3): node.out_port(3).set_data_type(np.int32) # the fourth output contains batch indices @staticmethod", "__init__(self, graph, attrs): mandatory_props = dict( type=self.op, op=self.op, version='opset6', infer=self.infer,", "return [ ('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()), 'max_detections_per_image', 'nms_threshold', 'num_classes', 'post_nms_count',", "(C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as", "the second output contains class indices node.out_port(2).set_data_type(in_data_type) if node.is_out_port_connected(3): node.out_port(3).set_data_type(np.int32)", "'nms_threshold', 'num_classes', 'post_nms_count', 'score_threshold', 'max_delta_log_wh', ('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))]", "class ExperimentalDetectronDetectionOutput(Op): op = 'ExperimentalDetectronDetectionOutput' enabled = True def __init__(self,", "[ ('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()), 'max_detections_per_image', 'nms_threshold', 'num_classes', 'post_nms_count', 'score_threshold',", "# the second output contains class indices node.out_port(2).set_data_type(in_data_type) if node.is_out_port_connected(3):", "= node.in_port(0).get_data_type() node.out_port(0).set_data_type(in_data_type) node.out_port(1).set_data_type(np.int32) # the second output contains class", "mandatory_props = dict( type=self.op, op=self.op, version='opset6', infer=self.infer, reverse_infer=self.reverse_infer, type_infer=self.type_infer, in_ports_count=4,", "type_infer(node): in_data_type = node.in_port(0).get_data_type() node.out_port(0).set_data_type(in_data_type) node.out_port(1).set_data_type(np.int32) # the second output", "np from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes from openvino.tools.mo.ops.op import", "outputs. for port_ind in range(1, 1 + max(node.out_ports().keys())): if not", "'score_threshold', 'max_delta_log_wh', ('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))] @staticmethod def infer(node):", "not node.out_port(port_ind).disconnected(): node.out_port(port_ind).data.set_shape([rois_num]) @staticmethod def type_infer(node): in_data_type = node.in_port(0).get_data_type() node.out_port(0).set_data_type(in_data_type)", "# classes, scores, batch indices # We use range(1, 1", "output contains batch indices @staticmethod def reverse_infer(node): set_input_shapes(node, shape_array([dynamic_dimension_value, 4]),", "in_data_type = node.in_port(0).get_data_type() node.out_port(0).set_data_type(in_data_type) node.out_port(1).set_data_type(np.int32) # the second output contains", "numpy as np from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes from", "# SPDX-License-Identifier: Apache-2.0 import numpy as np from openvino.tools.mo.front.common.partial_infer.utils import", "'ExperimentalDetectronDetectionOutput' enabled = True def __init__(self, graph, attrs): mandatory_props =", "node: str(bool(node['class_agnostic_box_regression'])).lower()), 'max_detections_per_image', 'nms_threshold', 'num_classes', 'post_nms_count', 'score_threshold', 'max_delta_log_wh', ('deltas_weights', lambda", "node.out_port(1).set_data_type(np.int32) # the second output contains class indices node.out_port(2).set_data_type(in_data_type) if", "fourth output contains batch indices @staticmethod def reverse_infer(node): set_input_shapes(node, shape_array([dynamic_dimension_value,", "mandatory_props, attrs) def backend_attrs(self): return [ ('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()),", "@staticmethod def type_infer(node): in_data_type = node.in_port(0).get_data_type() node.out_port(0).set_data_type(in_data_type) node.out_port(1).set_data_type(np.int32) # the", "We use range(1, 1 + max(node.out_ports().keys())) instead of range(1, 3),", "def __init__(self, graph, attrs): mandatory_props = dict( type=self.op, op=self.op, version='opset6',", "batch indices @staticmethod def reverse_infer(node): set_input_shapes(node, shape_array([dynamic_dimension_value, 4]), shape_array([dynamic_dimension_value, node['num_classes']", "boxes node.out_port(0).data.set_shape([rois_num, 4]) # classes, scores, batch indices # We", "'max_detections_per_image', 'nms_threshold', 'num_classes', 'post_nms_count', 'score_threshold', 'max_delta_log_wh', ('deltas_weights', lambda node: ','.join(map(str,", "= node.max_detections_per_image # boxes node.out_port(0).data.set_shape([rois_num, 4]) # classes, scores, batch", "super().__init__(graph, mandatory_props, attrs) def backend_attrs(self): return [ ('class_agnostic_box_regression', lambda node:", "shape_array([dynamic_dimension_value, 4]), shape_array([dynamic_dimension_value, node['num_classes'] * 4]), shape_array([dynamic_dimension_value, node['num_classes']]), shape_array([1, 3]))", "4 outputs. for port_ind in range(1, 1 + max(node.out_ports().keys())): if", "generated models where ExperimentalDetectronDetectionOutput has 4 outputs. for port_ind in", "# the fourth output contains batch indices @staticmethod def reverse_infer(node):", "','.join(map(str, node['deltas_weights'])))] @staticmethod def infer(node): rois_num = node.max_detections_per_image # boxes", "instead of range(1, 3), because there are incorrectly # generated", "# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import", "because there are incorrectly # generated models where ExperimentalDetectronDetectionOutput has", "Op class ExperimentalDetectronDetectionOutput(Op): op = 'ExperimentalDetectronDetectionOutput' enabled = True def", "graph, attrs): mandatory_props = dict( type=self.op, op=self.op, version='opset6', infer=self.infer, reverse_infer=self.reverse_infer,", "2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np", "second output contains class indices node.out_port(2).set_data_type(in_data_type) if node.is_out_port_connected(3): node.out_port(3).set_data_type(np.int32) #", "1 + max(node.out_ports().keys())) instead of range(1, 3), because there are", "@staticmethod def reverse_infer(node): set_input_shapes(node, shape_array([dynamic_dimension_value, 4]), shape_array([dynamic_dimension_value, node['num_classes'] * 4]),", "def infer(node): rois_num = node.max_detections_per_image # boxes node.out_port(0).data.set_shape([rois_num, 4]) #", "import Op class ExperimentalDetectronDetectionOutput(Op): op = 'ExperimentalDetectronDetectionOutput' enabled = True", "# We use range(1, 1 + max(node.out_ports().keys())) instead of range(1,", "= dict( type=self.op, op=self.op, version='opset6', infer=self.infer, reverse_infer=self.reverse_infer, type_infer=self.type_infer, in_ports_count=4, out_ports_count=3,", "= True def __init__(self, graph, attrs): mandatory_props = dict( type=self.op,", "('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()), 'max_detections_per_image', 'nms_threshold', 'num_classes', 'post_nms_count', 'score_threshold', 'max_delta_log_wh',", "ExperimentalDetectronDetectionOutput has 4 outputs. for port_ind in range(1, 1 +", "version='opset6', infer=self.infer, reverse_infer=self.reverse_infer, type_infer=self.type_infer, in_ports_count=4, out_ports_count=3, ) super().__init__(graph, mandatory_props, attrs)", "there are incorrectly # generated models where ExperimentalDetectronDetectionOutput has 4", "the fourth output contains batch indices @staticmethod def reverse_infer(node): set_input_shapes(node,", "from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes from openvino.tools.mo.ops.op import Op" ]
[ "np.loadtxt(f, delimiter='|') # Ignore SepsisLabel column if present. if column_names[-1]", "f.lower().endswith('psv'): files.append(f) if not os.path.isdir(output_directory): os.mkdir(output_directory) # Load model. model", "numpy as np, os, sys from get_sepsis_score import load_sepsis_model, get_sepsis_score", "load_sepsis_model() print(model) # Iterate over files. for f in files:", "'SepsisLabel': column_names = column_names[:-1] data = data[:, :-1] return data", "input_file = os.path.join(input_directory, f) data = load_challenge_data(input_file) # print(type(data)) #", "python driver.py input output.') input_directory = sys.argv[1] output_directory = sys.argv[2]", "model = load_sepsis_model() print(model) # Iterate over files. for f", "= f.readline().strip() column_names = header.split('|') data = np.loadtxt(f, delimiter='|') #", "delimiter='|') # Ignore SepsisLabel column if present. if column_names[-1] ==", "driver.py input output.') input_directory = sys.argv[1] output_directory = sys.argv[2] #", "open(file, 'w') as f: f.write('PredictedProbability|PredictedLabel\\n') for (s, l) in zip(scores,", "# Ignore SepsisLabel column if present. if column_names[-1] == 'SepsisLabel':", "'w') as f: f.write('PredictedProbability|PredictedLabel\\n') for (s, l) in zip(scores, labels):", "over files. for f in files: # Load data. input_file", "t in range(num_rows): current_data = data[:t+1] current_score, current_label = get_sepsis_score(current_data,", "labels): with open(file, 'w') as f: f.write('PredictedProbability|PredictedLabel\\n') for (s, l)", "header.split('|') data = np.loadtxt(f, delimiter='|') # Ignore SepsisLabel column if", "import load_sepsis_model, get_sepsis_score def load_challenge_data(file): with open(file, 'r') as f:", "save_challenge_predictions(file, scores, labels): with open(file, 'w') as f: f.write('PredictedProbability|PredictedLabel\\n') for", "# Find files. files = [] for f in os.listdir(input_directory):", "scores[t] = current_score labels[t] = current_label # Save results. output_file", "data = data[:, :-1] return data def save_challenge_predictions(file, scores, labels):", "= current_score labels[t] = current_label # Save results. output_file =", "# Make predictions. num_rows = len(data) scores = np.zeros(num_rows) labels", "files. files = [] for f in os.listdir(input_directory): if os.path.isfile(os.path.join(input_directory,", "SepsisLabel column if present. if column_names[-1] == 'SepsisLabel': column_names =", "current_label # Save results. output_file = os.path.join(output_directory, f) save_challenge_predictions(output_file, scores,", "import numpy as np, os, sys from get_sepsis_score import load_sepsis_model,", "zip(scores, labels): f.write('%g|%d\\n' % (s, l)) if __name__ == '__main__':", "input_directory = sys.argv[1] output_directory = sys.argv[2] # Find files. files", "for f in files: # Load data. input_file = os.path.join(input_directory,", "files.append(f) if not os.path.isdir(output_directory): os.mkdir(output_directory) # Load model. model =", "current_data = data[:t+1] current_score, current_label = get_sepsis_score(current_data, model) scores[t] =", "model) scores[t] = current_score labels[t] = current_label # Save results.", "input output.') input_directory = sys.argv[1] output_directory = sys.argv[2] # Find", "f: f.write('PredictedProbability|PredictedLabel\\n') for (s, l) in zip(scores, labels): f.write('%g|%d\\n' %", "data = np.loadtxt(f, delimiter='|') # Ignore SepsisLabel column if present.", "= np.zeros(num_rows) labels = np.zeros(num_rows) for t in range(num_rows): current_data", "== 'SepsisLabel': column_names = column_names[:-1] data = data[:, :-1] return", "# Iterate over files. for f in files: # Load", "column_names[:-1] data = data[:, :-1] return data def save_challenge_predictions(file, scores,", "as f: header = f.readline().strip() column_names = header.split('|') data =", "and f.lower().endswith('psv'): files.append(f) if not os.path.isdir(output_directory): os.mkdir(output_directory) # Load model.", "for t in range(num_rows): current_data = data[:t+1] current_score, current_label =", "files. for f in files: # Load data. input_file =", "os.listdir(input_directory): if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('psv'): files.append(f)", "load_challenge_data(input_file) # print(type(data)) # Make predictions. num_rows = len(data) scores", "load_challenge_data(file): with open(file, 'r') as f: header = f.readline().strip() column_names", "= np.loadtxt(f, delimiter='|') # Ignore SepsisLabel column if present. if", "[] for f in os.listdir(input_directory): if os.path.isfile(os.path.join(input_directory, f)) and not", "l)) if __name__ == '__main__': # Parse arguments. if len(sys.argv)", "files = [] for f in os.listdir(input_directory): if os.path.isfile(os.path.join(input_directory, f))", "= os.path.join(input_directory, f) data = load_challenge_data(input_file) # print(type(data)) # Make", "= np.zeros(num_rows) for t in range(num_rows): current_data = data[:t+1] current_score,", "print(type(data)) # Make predictions. num_rows = len(data) scores = np.zeros(num_rows)", "raise Exception('Include the input and output directories as arguments, e.g.,", "f)) and not f.lower().startswith('.') and f.lower().endswith('psv'): files.append(f) if not os.path.isdir(output_directory):", "Ignore SepsisLabel column if present. if column_names[-1] == 'SepsisLabel': column_names", "if column_names[-1] == 'SepsisLabel': column_names = column_names[:-1] data = data[:,", "data = load_challenge_data(input_file) # print(type(data)) # Make predictions. num_rows =", "header = f.readline().strip() column_names = header.split('|') data = np.loadtxt(f, delimiter='|')", "= header.split('|') data = np.loadtxt(f, delimiter='|') # Ignore SepsisLabel column", "(s, l)) if __name__ == '__main__': # Parse arguments. if", "np.zeros(num_rows) labels = np.zeros(num_rows) for t in range(num_rows): current_data =", "python import numpy as np, os, sys from get_sepsis_score import", "if not os.path.isdir(output_directory): os.mkdir(output_directory) # Load model. model = load_sepsis_model()", "column_names[-1] == 'SepsisLabel': column_names = column_names[:-1] data = data[:, :-1]", "= sys.argv[2] # Find files. files = [] for f", "scores, labels): with open(file, 'w') as f: f.write('PredictedProbability|PredictedLabel\\n') for (s,", "f: header = f.readline().strip() column_names = header.split('|') data = np.loadtxt(f,", "print(model) # Iterate over files. for f in files: #", "data[:t+1] current_score, current_label = get_sepsis_score(current_data, model) scores[t] = current_score labels[t]", "output.') input_directory = sys.argv[1] output_directory = sys.argv[2] # Find files.", "and not f.lower().startswith('.') and f.lower().endswith('psv'): files.append(f) if not os.path.isdir(output_directory): os.mkdir(output_directory)", "f.write('PredictedProbability|PredictedLabel\\n') for (s, l) in zip(scores, labels): f.write('%g|%d\\n' % (s,", "directories as arguments, e.g., python driver.py input output.') input_directory =", "current_label = get_sepsis_score(current_data, model) scores[t] = current_score labels[t] = current_label", "if present. if column_names[-1] == 'SepsisLabel': column_names = column_names[:-1] data", "get_sepsis_score def load_challenge_data(file): with open(file, 'r') as f: header =", "os.path.isdir(output_directory): os.mkdir(output_directory) # Load model. model = load_sepsis_model() print(model) #", ":-1] return data def save_challenge_predictions(file, scores, labels): with open(file, 'w')", "'__main__': # Parse arguments. if len(sys.argv) != 3: raise Exception('Include", "Iterate over files. for f in files: # Load data.", "# Load model. model = load_sepsis_model() print(model) # Iterate over", "return data def save_challenge_predictions(file, scores, labels): with open(file, 'w') as", "data[:, :-1] return data def save_challenge_predictions(file, scores, labels): with open(file,", "for f in os.listdir(input_directory): if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.')", "get_sepsis_score import load_sepsis_model, get_sepsis_score def load_challenge_data(file): with open(file, 'r') as", "= load_challenge_data(input_file) # print(type(data)) # Make predictions. num_rows = len(data)", "data. input_file = os.path.join(input_directory, f) data = load_challenge_data(input_file) # print(type(data))", "in range(num_rows): current_data = data[:t+1] current_score, current_label = get_sepsis_score(current_data, model)", "files: # Load data. input_file = os.path.join(input_directory, f) data =", "scores = np.zeros(num_rows) labels = np.zeros(num_rows) for t in range(num_rows):", "Make predictions. num_rows = len(data) scores = np.zeros(num_rows) labels =", "(s, l) in zip(scores, labels): f.write('%g|%d\\n' % (s, l)) if", "# Load data. input_file = os.path.join(input_directory, f) data = load_challenge_data(input_file)", "output_directory = sys.argv[2] # Find files. files = [] for", "Parse arguments. if len(sys.argv) != 3: raise Exception('Include the input", "sys from get_sepsis_score import load_sepsis_model, get_sepsis_score def load_challenge_data(file): with open(file,", "data def save_challenge_predictions(file, scores, labels): with open(file, 'w') as f:", "with open(file, 'r') as f: header = f.readline().strip() column_names =", "__name__ == '__main__': # Parse arguments. if len(sys.argv) != 3:", "f.write('%g|%d\\n' % (s, l)) if __name__ == '__main__': # Parse", "arguments. if len(sys.argv) != 3: raise Exception('Include the input and", "load_sepsis_model, get_sepsis_score def load_challenge_data(file): with open(file, 'r') as f: header", "num_rows = len(data) scores = np.zeros(num_rows) labels = np.zeros(num_rows) for", "= [] for f in os.listdir(input_directory): if os.path.isfile(os.path.join(input_directory, f)) and", "= data[:t+1] current_score, current_label = get_sepsis_score(current_data, model) scores[t] = current_score", "and output directories as arguments, e.g., python driver.py input output.')", "!= 3: raise Exception('Include the input and output directories as", "not os.path.isdir(output_directory): os.mkdir(output_directory) # Load model. model = load_sepsis_model() print(model)", "arguments, e.g., python driver.py input output.') input_directory = sys.argv[1] output_directory", "predictions. num_rows = len(data) scores = np.zeros(num_rows) labels = np.zeros(num_rows)", "in os.listdir(input_directory): if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('psv'):", "= data[:, :-1] return data def save_challenge_predictions(file, scores, labels): with", "% (s, l)) if __name__ == '__main__': # Parse arguments.", "as arguments, e.g., python driver.py input output.') input_directory = sys.argv[1]", "range(num_rows): current_data = data[:t+1] current_score, current_label = get_sepsis_score(current_data, model) scores[t]", "current_score, current_label = get_sepsis_score(current_data, model) scores[t] = current_score labels[t] =", "== '__main__': # Parse arguments. if len(sys.argv) != 3: raise", "def save_challenge_predictions(file, scores, labels): with open(file, 'w') as f: f.write('PredictedProbability|PredictedLabel\\n')", "if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('psv'): files.append(f) if", "input and output directories as arguments, e.g., python driver.py input", "sys.argv[2] # Find files. files = [] for f in", "the input and output directories as arguments, e.g., python driver.py", "Load data. input_file = os.path.join(input_directory, f) data = load_challenge_data(input_file) #", "labels = np.zeros(num_rows) for t in range(num_rows): current_data = data[:t+1]", "current_score labels[t] = current_label # Save results. output_file = os.path.join(output_directory,", "labels): f.write('%g|%d\\n' % (s, l)) if __name__ == '__main__': #", "f.readline().strip() column_names = header.split('|') data = np.loadtxt(f, delimiter='|') # Ignore", "= get_sepsis_score(current_data, model) scores[t] = current_score labels[t] = current_label #", "get_sepsis_score(current_data, model) scores[t] = current_score labels[t] = current_label # Save", "f.lower().startswith('.') and f.lower().endswith('psv'): files.append(f) if not os.path.isdir(output_directory): os.mkdir(output_directory) # Load", "f) data = load_challenge_data(input_file) # print(type(data)) # Make predictions. num_rows", "= load_sepsis_model() print(model) # Iterate over files. for f in", "Load model. model = load_sepsis_model() print(model) # Iterate over files.", "= current_label # Save results. output_file = os.path.join(output_directory, f) save_challenge_predictions(output_file,", "column_names = header.split('|') data = np.loadtxt(f, delimiter='|') # Ignore SepsisLabel", "len(sys.argv) != 3: raise Exception('Include the input and output directories", "model. model = load_sepsis_model() print(model) # Iterate over files. for", "as f: f.write('PredictedProbability|PredictedLabel\\n') for (s, l) in zip(scores, labels): f.write('%g|%d\\n'", "# print(type(data)) # Make predictions. num_rows = len(data) scores =", "for (s, l) in zip(scores, labels): f.write('%g|%d\\n' % (s, l))", "as np, os, sys from get_sepsis_score import load_sepsis_model, get_sepsis_score def", "from get_sepsis_score import load_sepsis_model, get_sepsis_score def load_challenge_data(file): with open(file, 'r')", "column_names = column_names[:-1] data = data[:, :-1] return data def", "not f.lower().startswith('.') and f.lower().endswith('psv'): files.append(f) if not os.path.isdir(output_directory): os.mkdir(output_directory) #", "if __name__ == '__main__': # Parse arguments. if len(sys.argv) !=", "sys.argv[1] output_directory = sys.argv[2] # Find files. files = []", "labels[t] = current_label # Save results. output_file = os.path.join(output_directory, f)", "3: raise Exception('Include the input and output directories as arguments,", "in zip(scores, labels): f.write('%g|%d\\n' % (s, l)) if __name__ ==", "os.mkdir(output_directory) # Load model. model = load_sepsis_model() print(model) # Iterate", "# Save results. output_file = os.path.join(output_directory, f) save_challenge_predictions(output_file, scores, labels)", "f in os.listdir(input_directory): if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and", "with open(file, 'w') as f: f.write('PredictedProbability|PredictedLabel\\n') for (s, l) in", "np.zeros(num_rows) for t in range(num_rows): current_data = data[:t+1] current_score, current_label", "# Parse arguments. if len(sys.argv) != 3: raise Exception('Include the", "e.g., python driver.py input output.') input_directory = sys.argv[1] output_directory =", "open(file, 'r') as f: header = f.readline().strip() column_names = header.split('|')", "output directories as arguments, e.g., python driver.py input output.') input_directory", "os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('psv'): files.append(f) if not", "= column_names[:-1] data = data[:, :-1] return data def save_challenge_predictions(file,", "#!/usr/bin/env python import numpy as np, os, sys from get_sepsis_score", "= len(data) scores = np.zeros(num_rows) labels = np.zeros(num_rows) for t", "os, sys from get_sepsis_score import load_sepsis_model, get_sepsis_score def load_challenge_data(file): with", "in files: # Load data. input_file = os.path.join(input_directory, f) data", "def load_challenge_data(file): with open(file, 'r') as f: header = f.readline().strip()", "np, os, sys from get_sepsis_score import load_sepsis_model, get_sepsis_score def load_challenge_data(file):", "present. if column_names[-1] == 'SepsisLabel': column_names = column_names[:-1] data =", "Find files. files = [] for f in os.listdir(input_directory): if", "f in files: # Load data. input_file = os.path.join(input_directory, f)", "= sys.argv[1] output_directory = sys.argv[2] # Find files. files =", "os.path.join(input_directory, f) data = load_challenge_data(input_file) # print(type(data)) # Make predictions.", "len(data) scores = np.zeros(num_rows) labels = np.zeros(num_rows) for t in", "column if present. if column_names[-1] == 'SepsisLabel': column_names = column_names[:-1]", "Exception('Include the input and output directories as arguments, e.g., python", "l) in zip(scores, labels): f.write('%g|%d\\n' % (s, l)) if __name__", "if len(sys.argv) != 3: raise Exception('Include the input and output", "'r') as f: header = f.readline().strip() column_names = header.split('|') data" ]
[ "import perf_counter, time from LspLibrary import bcolors import time import", "the evolution of the minimal cost over generations plt.plot(list(range(len(data[\"max\"]))), data[\"max\"])", "return f\"{cls.clockEnd - cls.clockStart} second(s)\" @classmethod def started(cls): \"\"\" \"\"\"", "the evolution of the minimal cost over generations plt.plot(list(range(len(data[\"min\"]))), data[\"min\"])", "\"\"\" \"\"\" print('-----------------------------------------') print(cls.popsData) data = list(cls.popsData.values())[0] # Plots #", "collections import defaultdict from threading import Thread from time import", "\"\"\" print('-----------------------------------------') print(cls.popsData) data = list(cls.popsData.values())[0] # Plots # Plotting", "None) outputString = \"\" outputFilePath = \"data/output/output.txt\" verbose = False", "\"|/-\\\\\" idx = 0 # while thing_not_complete(): while cls.running: print(animation[idx", "= \"\" outputFilePath = \"data/output/output.txt\" verbose = False running =", "= True def __init__(self) -> None: \"\"\" \"\"\" pass @classmethod", "def plotData(cls): \"\"\" \"\"\" print('-----------------------------------------') print(cls.popsData) data = list(cls.popsData.values())[0] #", "@classmethod def waitingAnimation(cls): \"\"\" \"\"\" animation = \"|/-\\\\\" idx =", "= cls.duration() cls.output(durationStatement) # Saving all generated output to a", "\"\"\" \"\"\" cls.running = False LspRuntimeMonitor.clockEnd = perf_counter() @classmethod def", "defaultdict from threading import Thread from time import perf_counter, time", "to a default file cls.saveOutput() cls.plotData() @classmethod def plotData(cls): \"\"\"", "running = True def __init__(self) -> None: \"\"\" \"\"\" pass", "def ended(cls): \"\"\" \"\"\" cls.running = False LspRuntimeMonitor.clockEnd = perf_counter()", "output(cls, output): \"\"\" \"\"\" cls.outputString += output if cls.verbose: print(output)", "\"\"\" \"\"\" f = open(cls.outputFilePath, \"w\") f.write(cls.outputString) f.close() @classmethod def", "# Duration durationStatement = cls.duration() cls.output(durationStatement) # Saving all generated", "plt.show() # Plotting the evolution of the minimal cost over", "\"\"\" \"\"\" clockStart = None clockEnd = None mutation_strategy =", "#!/usr/bin/python3.5 # -*-coding: utf-8 -* from collections import defaultdict from", "clockEnd = None mutation_strategy = \"simple_mutation\" popsData = defaultdict(lambda: None)", "print(f\"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}\") # Thread(cls.waitingAnimation()) @classmethod def ended(cls): \"\"\" \"\"\"", "def output(cls, output): \"\"\" \"\"\" cls.outputString += output if cls.verbose:", "durationStatement = cls.duration() cls.output(durationStatement) # Saving all generated output to", "perf_counter() print(f\"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}\") # Thread(cls.waitingAnimation()) @classmethod def ended(cls): \"\"\"", "a default file cls.saveOutput() cls.plotData() @classmethod def plotData(cls): \"\"\" \"\"\"", "from LspLibrary import bcolors import time import matplotlib.pyplot as plt", "duration(cls): \"\"\" \"\"\" return f\"{cls.clockEnd - cls.clockStart} second(s)\" @classmethod def", "Saving all generated output to a default file cls.saveOutput() cls.plotData()", "class LspRuntimeMonitor: \"\"\" \"\"\" clockStart = None clockEnd = None", "plt.plot(list(range(len(data[\"min\"]))), data[\"min\"]) plt.ylabel(\"Population minimal cost\") plt.show() @classmethod def waitingAnimation(cls): \"\"\"", "__init__(self) -> None: \"\"\" \"\"\" pass @classmethod def duration(cls): \"\"\"", "def duration(cls): \"\"\" \"\"\" return f\"{cls.clockEnd - cls.clockStart} second(s)\" @classmethod", "plt.ylabel(\"Population minimal cost\") plt.show() @classmethod def waitingAnimation(cls): \"\"\" \"\"\" animation", "\"\"\" animation = \"|/-\\\\\" idx = 0 # while thing_not_complete():", "\"\"\" \"\"\" cls.outputString += output if cls.verbose: print(output) @classmethod def", "True def __init__(self) -> None: \"\"\" \"\"\" pass @classmethod def", "cost\") plt.show() @classmethod def waitingAnimation(cls): \"\"\" \"\"\" animation = \"|/-\\\\\"", "import bcolors import time import matplotlib.pyplot as plt class LspRuntimeMonitor:", "second(s)\" @classmethod def started(cls): \"\"\" \"\"\" cls.running = True LspRuntimeMonitor.clockStart", "Duration durationStatement = cls.duration() cls.output(durationStatement) # Saving all generated output", "mutation_strategy = \"simple_mutation\" popsData = defaultdict(lambda: None) outputString = \"\"", "outputString = \"\" outputFilePath = \"data/output/output.txt\" verbose = False running", "= open(cls.outputFilePath, \"w\") f.write(cls.outputString) f.close() @classmethod def report(cls): \"\"\" \"\"\"", "@classmethod def started(cls): \"\"\" \"\"\" cls.running = True LspRuntimeMonitor.clockStart =", "# Plotting the evolution of the minimal cost over generations", "from collections import defaultdict from threading import Thread from time", "\"simple_mutation\" popsData = defaultdict(lambda: None) outputString = \"\" outputFilePath =", "\"\"\" cls.running = False LspRuntimeMonitor.clockEnd = perf_counter() @classmethod def output(cls,", "perf_counter() @classmethod def output(cls, output): \"\"\" \"\"\" cls.outputString += output", "cls.running = True LspRuntimeMonitor.clockStart = perf_counter() print(f\"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}\") #", "file cls.saveOutput() cls.plotData() @classmethod def plotData(cls): \"\"\" \"\"\" print('-----------------------------------------') print(cls.popsData)", "plt class LspRuntimeMonitor: \"\"\" \"\"\" clockStart = None clockEnd =", "print(output) @classmethod def saveOutput(cls): \"\"\" \"\"\" f = open(cls.outputFilePath, \"w\")", "minimal cost\") plt.show() @classmethod def waitingAnimation(cls): \"\"\" \"\"\" animation =", "list(cls.popsData.values())[0] # Plots # Plotting the evolution of the minimal", "None mutation_strategy = \"simple_mutation\" popsData = defaultdict(lambda: None) outputString =", "= True LspRuntimeMonitor.clockStart = perf_counter() print(f\"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}\") # Thread(cls.waitingAnimation())", "False LspRuntimeMonitor.clockEnd = perf_counter() @classmethod def output(cls, output): \"\"\" \"\"\"", "def waitingAnimation(cls): \"\"\" \"\"\" animation = \"|/-\\\\\" idx = 0", "@classmethod def duration(cls): \"\"\" \"\"\" return f\"{cls.clockEnd - cls.clockStart} second(s)\"", "# while thing_not_complete(): while cls.running: print(animation[idx % len(animation)], end=\"\\r\") idx", "Plots # Plotting the evolution of the minimal cost over", "\"\"\" # Duration durationStatement = cls.duration() cls.output(durationStatement) # Saving all", "data[\"min\"]) plt.ylabel(\"Population minimal cost\") plt.show() @classmethod def waitingAnimation(cls): \"\"\" \"\"\"", "\"\"\" \"\"\" animation = \"|/-\\\\\" idx = 0 # while", "data.{bcolors.ENDC}\") # Thread(cls.waitingAnimation()) @classmethod def ended(cls): \"\"\" \"\"\" cls.running =", "= perf_counter() print(f\"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}\") # Thread(cls.waitingAnimation()) @classmethod def ended(cls):", "\"\"\" \"\"\" cls.running = True LspRuntimeMonitor.clockStart = perf_counter() print(f\"{bcolors.OKGREEN}Processing input", "\"w\") f.write(cls.outputString) f.close() @classmethod def report(cls): \"\"\" \"\"\" # Duration", "+= output if cls.verbose: print(output) @classmethod def saveOutput(cls): \"\"\" \"\"\"", "utf-8 -* from collections import defaultdict from threading import Thread", "\"\"\" cls.running = True LspRuntimeMonitor.clockStart = perf_counter() print(f\"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}\")", "\"\"\" \"\"\" pass @classmethod def duration(cls): \"\"\" \"\"\" return f\"{cls.clockEnd", "Thread from time import perf_counter, time from LspLibrary import bcolors", "def __init__(self) -> None: \"\"\" \"\"\" pass @classmethod def duration(cls):", "LspLibrary import bcolors import time import matplotlib.pyplot as plt class", "LspRuntimeMonitor.clockStart = perf_counter() print(f\"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}\") # Thread(cls.waitingAnimation()) @classmethod def", "minimal cost over generations plt.plot(list(range(len(data[\"max\"]))), data[\"max\"]) plt.ylabel(\"Population maximal cost\") plt.show()", "cls.running = False LspRuntimeMonitor.clockEnd = perf_counter() @classmethod def output(cls, output):", "animation = \"|/-\\\\\" idx = 0 # while thing_not_complete(): while", "cls.output(durationStatement) # Saving all generated output to a default file", "cls.outputString += output if cls.verbose: print(output) @classmethod def saveOutput(cls): \"\"\"", "bcolors import time import matplotlib.pyplot as plt class LspRuntimeMonitor: \"\"\"", "import matplotlib.pyplot as plt class LspRuntimeMonitor: \"\"\" \"\"\" clockStart =", "False running = True def __init__(self) -> None: \"\"\" \"\"\"", "Plotting the evolution of the minimal cost over generations plt.plot(list(range(len(data[\"max\"]))),", "cost over generations plt.plot(list(range(len(data[\"max\"]))), data[\"max\"]) plt.ylabel(\"Population maximal cost\") plt.show() #", "outputFilePath = \"data/output/output.txt\" verbose = False running = True def", "def report(cls): \"\"\" \"\"\" # Duration durationStatement = cls.duration() cls.output(durationStatement)", "plotData(cls): \"\"\" \"\"\" print('-----------------------------------------') print(cls.popsData) data = list(cls.popsData.values())[0] # Plots", "\"\" outputFilePath = \"data/output/output.txt\" verbose = False running = True", "maximal cost\") plt.show() # Plotting the evolution of the minimal", "the minimal cost over generations plt.plot(list(range(len(data[\"min\"]))), data[\"min\"]) plt.ylabel(\"Population minimal cost\")", "@classmethod def ended(cls): \"\"\" \"\"\" cls.running = False LspRuntimeMonitor.clockEnd =", "= False LspRuntimeMonitor.clockEnd = perf_counter() @classmethod def output(cls, output): \"\"\"", "from threading import Thread from time import perf_counter, time from", "evolution of the minimal cost over generations plt.plot(list(range(len(data[\"min\"]))), data[\"min\"]) plt.ylabel(\"Population", "data[\"max\"]) plt.ylabel(\"Population maximal cost\") plt.show() # Plotting the evolution of", "= 0 # while thing_not_complete(): while cls.running: print(animation[idx % len(animation)],", "cost\") plt.show() # Plotting the evolution of the minimal cost", "-* from collections import defaultdict from threading import Thread from", "time import perf_counter, time from LspLibrary import bcolors import time", "of the minimal cost over generations plt.plot(list(range(len(data[\"min\"]))), data[\"min\"]) plt.ylabel(\"Population minimal", "@classmethod def saveOutput(cls): \"\"\" \"\"\" f = open(cls.outputFilePath, \"w\") f.write(cls.outputString)", "\"\"\" return f\"{cls.clockEnd - cls.clockStart} second(s)\" @classmethod def started(cls): \"\"\"", "-> None: \"\"\" \"\"\" pass @classmethod def duration(cls): \"\"\" \"\"\"", "minimal cost over generations plt.plot(list(range(len(data[\"min\"]))), data[\"min\"]) plt.ylabel(\"Population minimal cost\") plt.show()", "@classmethod def report(cls): \"\"\" \"\"\" # Duration durationStatement = cls.duration()", "waitingAnimation(cls): \"\"\" \"\"\" animation = \"|/-\\\\\" idx = 0 #", "from time import perf_counter, time from LspLibrary import bcolors import", "LspRuntimeMonitor: \"\"\" \"\"\" clockStart = None clockEnd = None mutation_strategy", "Thread(cls.waitingAnimation()) @classmethod def ended(cls): \"\"\" \"\"\" cls.running = False LspRuntimeMonitor.clockEnd", "# Saving all generated output to a default file cls.saveOutput()", "# Plots # Plotting the evolution of the minimal cost", "saveOutput(cls): \"\"\" \"\"\" f = open(cls.outputFilePath, \"w\") f.write(cls.outputString) f.close() @classmethod", "open(cls.outputFilePath, \"w\") f.write(cls.outputString) f.close() @classmethod def report(cls): \"\"\" \"\"\" #", "idx = 0 # while thing_not_complete(): while cls.running: print(animation[idx %", "= None mutation_strategy = \"simple_mutation\" popsData = defaultdict(lambda: None) outputString", "all generated output to a default file cls.saveOutput() cls.plotData() @classmethod", "over generations plt.plot(list(range(len(data[\"min\"]))), data[\"min\"]) plt.ylabel(\"Population minimal cost\") plt.show() @classmethod def", "cost over generations plt.plot(list(range(len(data[\"min\"]))), data[\"min\"]) plt.ylabel(\"Population minimal cost\") plt.show() @classmethod", "verbose = False running = True def __init__(self) -> None:", "f = open(cls.outputFilePath, \"w\") f.write(cls.outputString) f.close() @classmethod def report(cls): \"\"\"", "import time import matplotlib.pyplot as plt class LspRuntimeMonitor: \"\"\" \"\"\"", "\"\"\" pass @classmethod def duration(cls): \"\"\" \"\"\" return f\"{cls.clockEnd -", "@classmethod def plotData(cls): \"\"\" \"\"\" print('-----------------------------------------') print(cls.popsData) data = list(cls.popsData.values())[0]", "@classmethod def output(cls, output): \"\"\" \"\"\" cls.outputString += output if", "= list(cls.popsData.values())[0] # Plots # Plotting the evolution of the", "of the minimal cost over generations plt.plot(list(range(len(data[\"max\"]))), data[\"max\"]) plt.ylabel(\"Population maximal", "\"\"\" \"\"\" # Duration durationStatement = cls.duration() cls.output(durationStatement) # Saving", "the minimal cost over generations plt.plot(list(range(len(data[\"max\"]))), data[\"max\"]) plt.ylabel(\"Population maximal cost\")", "# -*-coding: utf-8 -* from collections import defaultdict from threading", "time from LspLibrary import bcolors import time import matplotlib.pyplot as", "as plt class LspRuntimeMonitor: \"\"\" \"\"\" clockStart = None clockEnd", "while cls.running: print(animation[idx % len(animation)], end=\"\\r\") idx += 1 time.sleep(0.1)", "plt.plot(list(range(len(data[\"max\"]))), data[\"max\"]) plt.ylabel(\"Population maximal cost\") plt.show() # Plotting the evolution", "cls.duration() cls.output(durationStatement) # Saving all generated output to a default", "output if cls.verbose: print(output) @classmethod def saveOutput(cls): \"\"\" \"\"\" f", "default file cls.saveOutput() cls.plotData() @classmethod def plotData(cls): \"\"\" \"\"\" print('-----------------------------------------')", "print('-----------------------------------------') print(cls.popsData) data = list(cls.popsData.values())[0] # Plots # Plotting the", "Plotting the evolution of the minimal cost over generations plt.plot(list(range(len(data[\"min\"]))),", "generations plt.plot(list(range(len(data[\"min\"]))), data[\"min\"]) plt.ylabel(\"Population minimal cost\") plt.show() @classmethod def waitingAnimation(cls):", "\"\"\" f = open(cls.outputFilePath, \"w\") f.write(cls.outputString) f.close() @classmethod def report(cls):", "output to a default file cls.saveOutput() cls.plotData() @classmethod def plotData(cls):", "import defaultdict from threading import Thread from time import perf_counter,", "started(cls): \"\"\" \"\"\" cls.running = True LspRuntimeMonitor.clockStart = perf_counter() print(f\"{bcolors.OKGREEN}Processing", "report(cls): \"\"\" \"\"\" # Duration durationStatement = cls.duration() cls.output(durationStatement) #", "None: \"\"\" \"\"\" pass @classmethod def duration(cls): \"\"\" \"\"\" return", "cls.saveOutput() cls.plotData() @classmethod def plotData(cls): \"\"\" \"\"\" print('-----------------------------------------') print(cls.popsData) data", "\"\"\" clockStart = None clockEnd = None mutation_strategy = \"simple_mutation\"", "= defaultdict(lambda: None) outputString = \"\" outputFilePath = \"data/output/output.txt\" verbose", "while thing_not_complete(): while cls.running: print(animation[idx % len(animation)], end=\"\\r\") idx +=", "f.close() @classmethod def report(cls): \"\"\" \"\"\" # Duration durationStatement =", "cls.clockStart} second(s)\" @classmethod def started(cls): \"\"\" \"\"\" cls.running = True", "generated output to a default file cls.saveOutput() cls.plotData() @classmethod def", "-*-coding: utf-8 -* from collections import defaultdict from threading import", "defaultdict(lambda: None) outputString = \"\" outputFilePath = \"data/output/output.txt\" verbose =", "generations plt.plot(list(range(len(data[\"max\"]))), data[\"max\"]) plt.ylabel(\"Population maximal cost\") plt.show() # Plotting the", "plt.ylabel(\"Population maximal cost\") plt.show() # Plotting the evolution of the", "matplotlib.pyplot as plt class LspRuntimeMonitor: \"\"\" \"\"\" clockStart = None", "= \"simple_mutation\" popsData = defaultdict(lambda: None) outputString = \"\" outputFilePath", "f\"{cls.clockEnd - cls.clockStart} second(s)\" @classmethod def started(cls): \"\"\" \"\"\" cls.running", "thing_not_complete(): while cls.running: print(animation[idx % len(animation)], end=\"\\r\") idx += 1", "clockStart = None clockEnd = None mutation_strategy = \"simple_mutation\" popsData", "data = list(cls.popsData.values())[0] # Plots # Plotting the evolution of", "= \"|/-\\\\\" idx = 0 # while thing_not_complete(): while cls.running:", "= \"data/output/output.txt\" verbose = False running = True def __init__(self)", "popsData = defaultdict(lambda: None) outputString = \"\" outputFilePath = \"data/output/output.txt\"", "0 # while thing_not_complete(): while cls.running: print(animation[idx % len(animation)], end=\"\\r\")", "True LspRuntimeMonitor.clockStart = perf_counter() print(f\"{bcolors.OKGREEN}Processing input data.{bcolors.ENDC}\") # Thread(cls.waitingAnimation()) @classmethod", "evolution of the minimal cost over generations plt.plot(list(range(len(data[\"max\"]))), data[\"max\"]) plt.ylabel(\"Population", "\"\"\" cls.outputString += output if cls.verbose: print(output) @classmethod def saveOutput(cls):", "ended(cls): \"\"\" \"\"\" cls.running = False LspRuntimeMonitor.clockEnd = perf_counter() @classmethod", "cls.plotData() @classmethod def plotData(cls): \"\"\" \"\"\" print('-----------------------------------------') print(cls.popsData) data =", "if cls.verbose: print(output) @classmethod def saveOutput(cls): \"\"\" \"\"\" f =", "<gh_stars>0 #!/usr/bin/python3.5 # -*-coding: utf-8 -* from collections import defaultdict", "LspRuntimeMonitor.clockEnd = perf_counter() @classmethod def output(cls, output): \"\"\" \"\"\" cls.outputString", "def saveOutput(cls): \"\"\" \"\"\" f = open(cls.outputFilePath, \"w\") f.write(cls.outputString) f.close()", "print(cls.popsData) data = list(cls.popsData.values())[0] # Plots # Plotting the evolution", "= perf_counter() @classmethod def output(cls, output): \"\"\" \"\"\" cls.outputString +=", "cls.verbose: print(output) @classmethod def saveOutput(cls): \"\"\" \"\"\" f = open(cls.outputFilePath,", "perf_counter, time from LspLibrary import bcolors import time import matplotlib.pyplot", "time import matplotlib.pyplot as plt class LspRuntimeMonitor: \"\"\" \"\"\" clockStart", "\"\"\" \"\"\" return f\"{cls.clockEnd - cls.clockStart} second(s)\" @classmethod def started(cls):", "f.write(cls.outputString) f.close() @classmethod def report(cls): \"\"\" \"\"\" # Duration durationStatement", "- cls.clockStart} second(s)\" @classmethod def started(cls): \"\"\" \"\"\" cls.running =", "plt.show() @classmethod def waitingAnimation(cls): \"\"\" \"\"\" animation = \"|/-\\\\\" idx", "over generations plt.plot(list(range(len(data[\"max\"]))), data[\"max\"]) plt.ylabel(\"Population maximal cost\") plt.show() # Plotting", "threading import Thread from time import perf_counter, time from LspLibrary", "= False running = True def __init__(self) -> None: \"\"\"", "input data.{bcolors.ENDC}\") # Thread(cls.waitingAnimation()) @classmethod def ended(cls): \"\"\" \"\"\" cls.running", "output): \"\"\" \"\"\" cls.outputString += output if cls.verbose: print(output) @classmethod", "\"data/output/output.txt\" verbose = False running = True def __init__(self) ->", "# Thread(cls.waitingAnimation()) @classmethod def ended(cls): \"\"\" \"\"\" cls.running = False", "pass @classmethod def duration(cls): \"\"\" \"\"\" return f\"{cls.clockEnd - cls.clockStart}", "def started(cls): \"\"\" \"\"\" cls.running = True LspRuntimeMonitor.clockStart = perf_counter()", "= None clockEnd = None mutation_strategy = \"simple_mutation\" popsData =", "None clockEnd = None mutation_strategy = \"simple_mutation\" popsData = defaultdict(lambda:", "import Thread from time import perf_counter, time from LspLibrary import" ]
[]
[ "= sns.relplot(data=data, x='x_median', y='y_median', hue=\"predicted_sex\", size=show_failure, style=show_mismatches, sizes=sample_sizes, alpha=.5, palette=custom_palette,", "output.loc[row.Index, 'sex_matches'] = sex_matches else: pass # no Sex/Gender column", "working for batches yet. meth, unmeth = methylcheck.qc_plot._get_data( data_containers=data_source, path=None,", "methylated and unmethylated values. can be raw OR corrected\"\"\" #", "a PDF report if you like. custom_label: Option to provide", "labels (using custom column dict) - unit tests with custom", "because actual sexes listed in your samplesheet are not understood", "logging LOGGER = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def _get_copy_number(meth,unmeth): \"\"\"function to return", "sns from pathlib import Path #app import methylcheck # uses", "the output table and on the plot. Note: you must", "else: label = f\"{custom_label.get(idx)}\" if isinstance(custom_label, dict) else None if", "not just X or Y if include_probe_failure_percent == True and", "controls_report() does the same thing, and only calls get_sex() with", "the minimum difference in the medians of X and Y", "on screen. return_labels: (requires plot == True) When using poobah_cutoff,", "= logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def _get_copy_number(meth,unmeth): \"\"\"function to return copy number.", "returns a pyplot figure instead of a dataframe. Default is", "will skip plotting and just return a dictionary with sample_ids", "isinstance(custom_label, dict) and custom_label.get(idx) else label_lookup[idx] ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center',", "text is (ID, delta age) - sex mismatches are X,", "maximum percent of sample probes that can fail before the", "loaded_files['meta'] = pd.read_csv(filename) break if len(loaded_files) == 1: # methylprep", "x_meth = meth[meth.index.isin(x_probes)] x_unmeth = unmeth[unmeth.index.isin(x_probes)] y_meth = meth[meth.index.isin(y_probes)] y_unmeth", "else: raise ValueError(\"Your sample sheet must have a Sample_ID column,", "dataframe with predicted sex output['predicted_sex'] = sex0 output = output.round(1)", "None if ('Gender' in loaded_files['meta'].columns or 'Sex' in loaded_files['meta'].columns): if", "len(data['sample_failure_percent'].index) ax.set_title(f\"{N_failed} of {N_total} samples failed poobah, with at least", "'predicted_sex', 'X_fail_percent', 'Y_fail_percent'] - color is sex, pink or blue", "is False. Note: return_fig will not show a plot on", "# avoid sizing dots with narrow range; gives false impression", "one sex, make sure male is blue; female is pink", "and Y probes.\") if save: filepath = 'predicted_sexes.png' if data_source_type", "loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) + '_' + loaded_files['meta']['Sentrix_Position'].astype(str) loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID')", "must supply a 'path' as data_source to include poobah in", "with output data index\") #sns.set_theme(style=\"white\") show_mismatches = None if 'sex_matches'", "== str(row.predicted_sex).upper() else 0 else: sex_matches = np.nan output.loc[row.Index, 'actual_sex']", "loaded_files['meta']['Sentrix_ID'].astype(str) + '_' + loaded_files['meta']['Sentrix_Position'].astype(str) loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') else: raise", "# minfi R version: # log2(getMeth(object) + getUnmeth(object)) return np.log2(meth+unmeth)", "output.round(1) # if poobah_df exists, calculate percent X and Y", "poobah_df exists, calculate percent X and Y probes that failed", "Set that to the index when you pass it in.\")", "sex chromosomes x_meth = meth[meth.index.isin(x_probes)] x_unmeth = unmeth[unmeth.index.isin(x_probes)] y_meth =", "y_meth = meth[meth.index.isin(y_probes)] y_unmeth = unmeth[unmeth.index.isin(y_probes)] # create empty dataframe", "\"\"\" data columns: ['x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'] - color", "must match the data DF index. \"\"\" if sample_failure_percent !=", "renamed_column = 'Gender' elif 'Sex' in renamed_columns.values(): renamed_column = 'Sex'", "male or female (copied from the minfi sex predict function)", "import methylcheck # uses .load; get_sex uses methylprep models too", "= output.index.map(y_med) # compute difference median_difference = output['y_median'] - output['x_median']", "> 10: failed_samples.append(column) output['X_fail_percent'] = X_col #output.index.map(X_col) output['Y_fail_percent'] = Y_col", "Sample_ID column in samplesheet\") if 'Sample_ID' in loaded_files['meta'].columns: loaded_files['meta'] =", "output['sex_matches'] = None for row in output.itertuples(): try: actual_sex =", "when failed - adds legend of sketchy samples and labels", "of plot if there is a \"custom_label\" dict passed in,", "ymax=yscale[1]+1) label_lookup = {index_val: chr(i+65) if (i <= 26) else", "works with this function - save_fig - return_labels, returns a", "filename in Path(filepath).expanduser().rglob(file_pattern): if '.pkl' in filename.suffixes: loaded_files['meta'] = pd.read_pickle(filename)", "you like. custom_label: Option to provide a dictionary with keys", "user if any did. This feature won't work if a", "prepared using methylprep (non-standand CSV columns, for example) If a", "gives false impression of bad samples. poobah_range = data[\"sample_failure_percent\"].max() -", "output['Y_fail_percent'] = Y_col #output.index.map(Y_col) if failed_samples != []: LOGGER.warning(f\"{len(failed_samples)} samples", "plot, with option to `save` as image or `return_fig`. save", "mismatch: meth {len(meth)} -- unmeth {len(unmeth)}\") if array_type == None:", "!= 'path' else Path(data_source,'predicted_sexes.png').expanduser() plt.savefig(filepath, bbox_inches=\"tight\") if return_fig: return fig", "probes in sample, not just X or Y if include_probe_failure_percent", "= plt.gca().get_ylim() xscale = plt.gca().get_xlim() if abs(yscale[1]-yscale[0]) < 2.0: ax.set_xlim(xmin=xscale[0]-1,", "methylcheck.load_processed._data_source_type(data_source) # data_source_type is one of {'path', 'container', 'control', 'meth_unmeth_tuple'}", "plotting and just return a dictionary with sample_ids and these", "column in poobah.columns: sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) / len(poobah.index),1)", "LOGGER.setLevel(logging.WARNING) manifest = Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom', '27k', '450k',", "= pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser()) elif data_source_type in ('container'): # this will look", "pink # if hasattr(output, 'actual_sex') and set(output.actual_sex) == set('M') #", "plt.gca().get_xlim() if abs(yscale[1]-yscale[0]) < 2.0: ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1) ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1) label_lookup", "a path. Note: ~90% of Y probes should fail if", "inputs: ======= the \"data_source\" can be any one of: path", "a data_containers object, a list of data_containers containing raw meth/unmeth", "any(loaded_files['meta'].columns.duplicated()): loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()] LOGGER.info(\"Removed a duplicate Sample_ID column", "1.6.0 meta_data lacking Sample_ID when sample_sheet uses alt column names", "in your output DataFrame; Set that to the index when", "M/F; controls_report() does NOT do this step, but should. sex_values", "count mismatch: meth {len(meth)} -- unmeth {len(unmeth)}\") if array_type ==", "20 (percent) Has no effect if `include_probe_failure_percent` is False. plot", "failed_samples != []: LOGGER.warning(f\"{len(failed_samples)} samples had >10% of X probes", "actual sex from meta data to compare.\") if isinstance(actual_sex, pd.Series):", "both slower) # the saved pickles function isn't working for", "output.index.map(x_med) output['y_median'] = output.index.map(y_med) # compute difference median_difference = output['y_median']", "'control', 'meth_unmeth_tuple'} poobah=None if data_source_type in ('path'): # this will", "= Y_col #output.index.map(Y_col) if failed_samples != []: LOGGER.warning(f\"{len(failed_samples)} samples had", "list(set(failed_probe_names) & set(x_probes)) failed_y_probe_names = list(set(failed_probe_names) & set(y_probes)) X_percent =", "Check and fix here: # methylcheck 0.7.9 / prep 1.6.0", "unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=True, verbose=False) if include_probe_failure_percent", "female (copied from the minfi sex predict function) include_probe_failure_percent: True:", "sure male is blue; female is pink # if hasattr(output,", "dict) and custom_label.get(idx) else label_lookup[idx] ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center', fontsize=10,", "the saved pickles function isn't working for batches yet. try:", "#output.index.map(Y_col) if failed_samples != []: LOGGER.warning(f\"{len(failed_samples)} samples had >10% of", "not in data.columns else \"sex_matches\" if show_mismatches: data[\"sex_matches\"] = data[\"sex_matches\"].map({0:\"Mismatch\",", "= loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'}) elif 'male' in sex_values or 'female' in", "a dataframe with Sample_ID in the index. This adds actual_sex", "noob=False, verbose=False) except Exception as e: meth, unmeth = methylcheck.qc_plot._get_data(", "with values output['x_median'] = output.index.map(x_med) output['y_median'] = output.index.map(y_med) # compute", "sizes=sample_sizes, alpha=.5, palette=custom_palette, height=8, aspect=1.34) ax = fig.axes[0,0] fig.fig.subplots_adjust(top=.95) #", "failed_probe_names = poobah[column][poobah[column] >= p_value_cutoff].index failed_x_probe_names = list(set(failed_probe_names) & set(x_probes))", "might only contain one sex if 'Male' in sex_values or", "and custom_label.get(idx) else label_lookup[idx] ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center', fontsize=10, color='darkred')", "That chromosome is missing.\"\"\" allowed_array_types = {'27k','450k','epic','epic+','mouse'} try: from methylprep.files", "you can also pass in a data_containers object, a list", "unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=False, verbose=False) except Exception", "for these may be unreliable:\") LOGGER.warning(f\"{failed_samples}\") if data_source_type in ('path'):", "sample_failure_percent = {} # % of ALL probes in sample,", "'meth') tuple of (meth, unmeth) dataframes array_type (string) enum: {'27k','450k','epic','epic+','mouse'}", "x='x_median', y='y_median', hue=\"predicted_sex\", size=show_failure, style=show_mismatches, sizes=sample_sizes, alpha=.5, palette=custom_palette, height=8, aspect=1.34)", "and without, and check that controls_report still works with this", "= pd.DataFrame(index=[s for s in meth.columns], columns=['x_median','y_median','predicted_sex']) # get median", "if show_mismatches: data[\"sex_matches\"] = data[\"sex_matches\"].map({0:\"Mismatch\", 1:\"Match\"}) show_failure = None if", "Predictions for these may be unreliable:\") LOGGER.warning(f\"{failed_samples}\") if data_source_type in", "in loaded_files['meta'].columns: renamed_column = 'Gender' elif 'Sex' in loaded_files['meta'].columns: renamed_column", "if isinstance(actual_sex, pd.Series): LOGGER.warning(f\"Multiple samples matched actual sex for {row.Index},", "calculate and predict the sex of each sample. inputs: =======", "- unit tests with custom label and without, and check", "sex, pink or blue - marker circle size will be", "{} and set(sample_failure_percent.keys()) == set(data.index): data['sample_failure_percent'] = pd.Series(sample_failure_percent) else: LOGGER.warning(\"sample_failure_percent", "poobah_cutoff].index) N_total = len(data['sample_failure_percent'].index) ax.set_title(f\"{N_failed} of {N_total} samples failed poobah,", "for each sex chromosome for each sample x_med = _get_copy_number(x_meth,x_unmeth).median()", "data_source to include poobah in plots. poobah_cutoff The maximum percent", "True and isinstance(poobah, pd.DataFrame): p_value_cutoff = 0.05 X_col = []", "elif 'MALE' in sex_values or 'FEMALE' in sex_values: loaded_files['meta'][renamed_column] =", "except Exception as e: meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source,", "e: meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=True, verbose=False)", "{'27k','450k','epic','epic+','mouse'} if not specified, it will load the data from", "LOGGER.debug(array_type) LOGGER.setLevel(logging.WARNING) manifest = Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom', '27k',", "_get_copy_number(meth,unmeth): \"\"\"function to return copy number. requires dataframes of methylated", "sample, not just X or Y if include_probe_failure_percent == True", "these labels correspond to, you can rerun the function with", "(expecting M or F): (found {sex_values})\") output['actual_sex'] = None output['sex_matches']", "col in loaded_files['meta'].columns} loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns) if 'Gender' in renamed_columns.values():", "label, horizontalalignment='center', fontsize=10, color='grey') if return_labels: plt.close() # release memory", "or 'Sex' in loaded_files['meta'].columns): if 'Gender' in loaded_files['meta'].columns: renamed_column =", "dataframe with Sample_ID in the index. This adds actual_sex as", "as sns from pathlib import Path #app import methylcheck #", "low variance. Like a probability cloud. - sample text is", "= _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output) if plot == True: fig = _plot_predicted_sex(data=output,", "data to be fast, because these are already loaded. Just", "the containers (which are both slower) # the saved pickles", "'FEMALE' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'}) elif 'm' in", "data_source_type != 'path' else Path(data_source,'predicted_sexes.png').expanduser() plt.savefig(filepath, bbox_inches=\"tight\") if return_fig: return", "to the multi-dimensional QC plot while providing a filepath is", "the medians of X and Y probe copy numbers to", "columns. Check and fix here: # methylcheck 0.7.9 / prep", "output): \"\"\"output is a dataframe with Sample_ID in the index.", "data_source) = methylcheck.load_processed._data_source_type(data_source) # data_source_type is one of {'path', 'container',", "= pd.read_csv(filename) break if len(loaded_files) == 1: # methylprep v1.5.4-6", "matplotlib.pyplot as plt import seaborn as sns from pathlib import", "len(unmeth): raise ValueError(f\"WARNING: probe count mismatch: meth {len(meth)} -- unmeth", "'Gender' in renamed_columns.values(): renamed_column = 'Gender' elif 'Sex' in renamed_columns.values():", "creating meta_data files with two Sample_ID columns. Check and fix", "Y probe copy numbers to assign male or female (copied", "actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column)) except KeyError: if 'Sample_ID' in output.columns: LOGGER.warning(\"Sample_ID", "in data.columns else \"sex_matches\" if show_mismatches: data[\"sex_matches\"] = data[\"sex_matches\"].map({0:\"Mismatch\", 1:\"Match\"})", "actual_sex = actual_sex[0] if hasattr(row,'predicted_sex'): sex_matches = 1 if actual_sex.upper()", "'meta', '*_meta_data.pkl': 'meta', '*samplesheet*.csv': 'meta', '*sample_sheet*.csv': 'meta', } loaded_files =", "function requires methylprep to be installed (pip3 install `methylprep`)\") (data_source_type,", "data_source=data_source, return_fig=return_fig, return_labels=return_labels, ) if return_labels: return fig # these", "actual sex into processed output, if available file_patterns = {", "loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()] LOGGER.info(\"Removed a duplicate Sample_ID column in", "x_probes = manifest.index[manifest['CHR']=='X'] y_probes = manifest.index[manifest['CHR']=='Y'] if verbose: LOGGER.info(f\"Found {len(x_probes)}", "labels - show delta age on labels (using custom column", "before the sample fails. Default is 20 (percent) Has no", "adds legend of sketchy samples and labels - show delta", "(or MANIFEST_DIR_PATH_LAMBDA) and auto-detected array here array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda))", "instead of plot if there is a \"custom_label\" dict passed", "elif 'm' in sex_values or 'f' in sex_values: loaded_files['meta'][renamed_column] =", "is produced by methylprep.run_pipeline, or by using methylcheck.load(filepath, format='meth') and", "a Sample_ID column, or (Sentrix_ID and Sentrix_Position) columns.\") # fixing", "NOT do this step, but should. sex_values = set(loaded_files['meta'][renamed_column].unique()) #print('sex_values',", "for the sex chromosomes x_meth = meth[meth.index.isin(x_probes)] x_unmeth = unmeth[unmeth.index.isin(x_probes)]", "size will be larger and more faded if poobah values", "row['y_median'], label, horizontalalignment='center', fontsize=10, color='darkred') else: label = f\"{custom_label.get(idx)}\" if", "not show a plot on screen. return_labels: (requires plot ==", "or 'female' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'}) elif 'MALE'", "for X and Y chromosomes that failed quality control, and", "in loaded_files['meta'].columns or 'Sex' in loaded_files['meta'].columns): if 'Gender' in loaded_files['meta'].columns:", "dataframes array_type (string) enum: {'27k','450k','epic','epic+','mouse'} if not specified, it will", "is True return_fig If True, returns a pyplot figure instead", "the plot. Note: you must supply a 'path' as data_source", "sex_values = set(loaded_files['meta'][renamed_column].unique()) #print('sex_values', sex_values) if not sex_values.issubset(set(['M','F'])): # subset,", "= X_col #output.index.map(X_col) output['Y_fail_percent'] = Y_col #output.index.map(Y_col) if failed_samples !=", "1 if actual_sex.upper() == str(row.predicted_sex).upper() else 0 else: sex_matches =", "sex. # merge actual sex into processed output, if available", "includes A-Z,1...N labels on samples on plot to make it", "data to compare.\") if isinstance(actual_sex, pd.Series): LOGGER.warning(f\"Multiple samples matched actual", "uses alt column names and gets replaced. if any(loaded_files['meta'].columns.duplicated()): loaded_files['meta']", "sex based on matching X and Y probes.\") if save:", "samples. e.g. add more data about samples to the multi-dimensional", "2.0: ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1) ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1) label_lookup = {index_val: chr(i+65) if", "copy number. requires dataframes of methylated and unmethylated values. can", "if your files were not prepared using methylprep (non-standand CSV", "if \"sample_failure_percent\" in data.columns: N_failed = len(data[data['sample_failure_percent'] > poobah_cutoff].index) N_total", "data_source and determine the array for you. median_cutoff the minimum", "save=False, poobah_cutoff=20, #% custom_label=None, data_source_type=None, data_source=None, return_fig=False, return_labels=False): \"\"\" data", "does NOT do this step, but should. sex_values = set(loaded_files['meta'][renamed_column].unique())", "to samples. e.g. add more data about samples to the", "were not prepared using methylprep (non-standand CSV columns, for example)", "<= 26) else str(i-26) for i,index_val in enumerate(data.index)} for idx,row", "median cutoff - can be manipulated by user --- default", "but work at -0.5. # populate dataframe with predicted sex", "pathlib import Path #app import methylcheck # uses .load; get_sex", "verbose: LOGGER.info(f\"Found {len(x_probes)} X and {len(y_probes)} Y probes\") # dataframes", "sketchy samples and labels - show delta age on labels", "probability cloud. - sample text is (ID, delta age) -", "color='grey') if return_labels: plt.close() # release memory return label_lookup if", "chr(i+65) if (i <= 26) else str(i-26) for i,index_val in", "in allowed_array_types: array_type = ArrayType(array_type) else: raise ValueError(f\"Your array_type must", "of X any Y probes - using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA)", "from data_source and determine the array for you. median_cutoff the", "- return_labels, returns a lookup dict instead of plot if", "least {poobah_cutoff}% of probes failing\") else: ax.set_title(f\"Predicted sex based on", "== True and Path(data_source,'poobah_values.pkl').expanduser().exists(): poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser()) elif data_source_type in", "PDF report if you like. custom_label: Option to provide a", "narrow range; gives false impression of bad samples. poobah_range =", "range; gives false impression of bad samples. poobah_range = data[\"sample_failure_percent\"].max()", "ValueError(f\"Cannot compare with predicted sex because actual sexes listed in", "meth[meth.index.isin(y_probes)] y_unmeth = unmeth[unmeth.index.isin(y_probes)] # create empty dataframe for output", "!= len(unmeth): raise ValueError(f\"WARNING: probe count mismatch: meth {len(meth)} --", "if return_fig: return fig return output def _plot_predicted_sex(data=pd.DataFrame(), sample_failure_percent={}, median_cutoff=", "= list(set(failed_probe_names) & set(y_probes)) X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1) Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1)", "column and returns it.\"\"\" # controls_report() does the same thing,", "unmeth) dataframes array_type (string) enum: {'27k','450k','epic','epic+','mouse'} if not specified, it", "v1.5.4-6 was creating meta_data files with two Sample_ID columns. Check", "user --- default = -2 --- used to predict sex", "load the data from data_source and determine the array for", "samplesheet pkl and poobah_values.pkl, if you want to compare predicted", "poobah_range < poobah_cutoff/2: show_failure = None sample_sizes = (40,40) custom_palette", "in, such as (actual_age - predicted_age), it simply adds those", "and poobah_values.pkl, if you want to compare predicted sex with", "output.itertuples(): try: actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column)) except KeyError: if 'Sample_ID' in", "folder also containing samplesheet pkl and poobah_values.pkl, if you want", "with return_labels=True and it will skip plotting and just return", "= {} # % of ALL probes in sample, not", "custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7'])) # if only one sex, make sure", "ValueError(\"Your sample sheet must have a Sample_ID column, or (Sentrix_ID", "larger and more faded if poobah values are worse, smaller", "# uses .load; get_sex uses methylprep models too and detect_array()", "== 'meth_unmeth_tuple': (meth, unmeth) = data_source if len(meth) != len(unmeth):", "returns it.\"\"\" # controls_report() does the same thing, and only", "include_probe_failure_percent=True, verbose=False, save=False, poobah_cutoff=20, #% custom_label=None, data_source_type=None, data_source=None, return_fig=False, return_labels=False):", "read actual sex from meta data to compare.\") if isinstance(actual_sex,", "tuple of dataframes is passed in, instead of a path.", "columns: ['x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'] - color is sex,", "dataframes path -- to a folder also containing samplesheet pkl", "'unmeth_values.pkl' dataframes path -- to a folder also containing samplesheet", "labels to apply to samples. e.g. add more data about", "plot. Note: you must supply a 'path' as data_source to", "The maximum percent of sample probes that can fail before", "return_fig will not show a plot on screen. return_labels: (requires", "[] Y_col = [] failed_samples = [] for column in", "adds those this label to the marker text labels. Dicts", "if 'Sample_ID' in output.columns: LOGGER.warning(\"Sample_ID was another column in your", "be raw OR corrected\"\"\" # minfi R version: # log2(getMeth(object)", "else: pass # no Sex/Gender column found in samplesheet return", "column in samplesheet\") if 'Sample_ID' in loaded_files['meta'].columns: loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID')", "Sentrix_Position) columns.\") # fixing case of the relevant column renamed_column", "skip plotting and just return a dictionary with sample_ids and", "in renamed_columns.values(): renamed_column = 'Sex' if renamed_column is not None:", "in enumerate(data.index)} for idx,row in data.iterrows(): if \"sample_failure_percent\" in row", "- predicted_age), it simply adds those this label to the", "f\"{custom_label.get(idx)}\" if isinstance(custom_label, dict) else None if label: ax.text(row['x_median']+0.05, row['y_median']+0.05,", "data[\"sample_failure_percent\"].min() if poobah_range < poobah_cutoff/2: show_failure = None sample_sizes =", "or 'Female' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'}) elif 'male'", "and set(output.actual_sex) == set('M') # if first value to be", "yscale = plt.gca().get_ylim() xscale = plt.gca().get_xlim() if abs(yscale[1]-yscale[0]) < 2.0:", "dataframes of meth and unmeth values for the sex chromosomes", "predicted sex because actual sexes listed in your samplesheet are", "elif 'Sex' in renamed_columns.values(): renamed_column = 'Sex' if renamed_column is", "def _plot_predicted_sex(data=pd.DataFrame(), sample_failure_percent={}, median_cutoff= -2, include_probe_failure_percent=True, verbose=False, save=False, poobah_cutoff=20, #%", "still works with this function - save_fig - return_labels, returns", "data.columns: N_failed = len(data[data['sample_failure_percent'] > poobah_cutoff].index) N_total = len(data['sample_failure_percent'].index) ax.set_title(f\"{N_failed}", "isinstance(actual_sex, pd.Series): LOGGER.warning(f\"Multiple samples matched actual sex for {row.Index}, because", "unmeth[unmeth.index.isin(x_probes)] y_meth = meth[meth.index.isin(y_probes)] y_unmeth = unmeth[unmeth.index.isin(y_probes)] # create empty", "len(meth) != len(unmeth): raise ValueError(f\"WARNING: probe count mismatch: meth {len(meth)}", "include_probe_failure_percent == True and isinstance(poobah, pd.DataFrame): p_value_cutoff = 0.05 X_col", "Y_col = [] failed_samples = [] for column in poobah.columns:", "of ALL probes in sample, not just X or Y", "not read actual sex from meta data to compare.\") if", "two Sample_ID columns. Check and fix here: # methylcheck 0.7.9", "{N_total} samples failed poobah, with at least {poobah_cutoff}% of probes", "for saved pickles first, then csvs or parsing the containers", "sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'}) elif 'm' in sex_values or", "# 'custom', '27k', '450k', 'epic', 'epic+' LOGGER.setLevel(logging.INFO) x_probes = manifest.index[manifest['CHR']=='X']", "= actual_sex output.loc[row.Index, 'sex_matches'] = sex_matches else: pass # no", "filepath is the easiest way, you can also pass in", "\"\"\"output is a dataframe with Sample_ID in the index. This", "DataFrame; Set that to the index when you pass it", "'*sample_sheet*.csv': 'meta', } loaded_files = {} for file_pattern in file_patterns:", "- sex mismatches are X, matched samples are circles (if", "p_value_cutoff].index failed_x_probe_names = list(set(failed_probe_names) & set(x_probes)) failed_y_probe_names = list(set(failed_probe_names) &", "return_fig: return fig return output def _plot_predicted_sex(data=pd.DataFrame(), sample_failure_percent={}, median_cutoff= -2,", "if you want to compare predicted sex with actual sex.", "try: from methylprep.files import Manifest from methylprep.models import ArrayType except", "return_labels=False): \"\"\" data columns: ['x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'] -", "# % of ALL probes in sample, not just X", "'meth_unmeth_tuple'} poobah=None if data_source_type in ('path'): # this will look", "without, and check that controls_report still works with this function", "loaded_files['meta'].set_index('Sample_ID') elif 'Sentrix_ID' in loaded_files['meta'].columns and 'Sentrix_Position' in loaded_files['meta'].columns: loaded_files['meta']['Sample_ID']", "loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') else: raise ValueError(\"Your sample sheet must have", "must be one of these: {allowed_array_types} or None.\") if verbose:", "when using -2, but work at -0.5. # populate dataframe", "female is pink # if hasattr(output, 'actual_sex') and set(output.actual_sex) ==", "into processed output, if available file_patterns = { 'sample_sheet_meta_data.pkl': 'meta',", "- show delta age on labels (using custom column dict)", "sex_values or 'Female' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'}) elif", "can fail before the sample fails. Default is 20 (percent)", "{'27k','450k','epic','epic+','mouse'} try: from methylprep.files import Manifest from methylprep.models import ArrayType", "predicted sex with actual sex. data_containers -- object created from", "loaded_files['meta'].columns: renamed_column = 'Gender' elif 'Sex' in loaded_files['meta'].columns: renamed_column =", "not be accurate.\") actual_sex = actual_sex[0] if hasattr(row,'predicted_sex'): sex_matches =", "in data.columns else \"sample_failure_percent\" sample_sizes = (20, 600) if show_failure:", "probe copy numbers to assign male or female (copied from", "the min scale to be at least 2 units. yscale", "else 'M' for x in median_difference] # NOTE for testing:", "= -2 --- used to predict sex sex0 = ['F'", "= data[\"sample_failure_percent\"].max() - data[\"sample_failure_percent\"].min() if poobah_range < poobah_cutoff/2: show_failure =", "passed in, such as (actual_age - predicted_age), it simply adds", "be plotted is male, change palette if hasattr(data, 'predicted_sex') and", "KeyError(\"Could not read actual sex from meta data to compare.\")", "the function with return_labels=True and it will skip plotting and", "with sample_ids and these labels, to embed in a PDF", "unmeth) = data_source if len(meth) != len(unmeth): raise ValueError(f\"WARNING: probe", "X and {len(y_probes)} Y probes\") # dataframes of meth and", "with actual sex. data_containers -- object created from methylprep.run_pipeline() or", "= sex_matches else: pass # no Sex/Gender column found in", "at least 2 units. yscale = plt.gca().get_ylim() xscale = plt.gca().get_xlim()", "numpy as np import matplotlib.pyplot as plt import seaborn as", "# create empty dataframe for output output = pd.DataFrame(index=[s for", "return fig plt.show() def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output): \"\"\"output is a dataframe", "1: # methylprep v1.5.4-6 was creating meta_data files with two", "to a folder also containing samplesheet pkl and poobah_values.pkl, if", "numbers to assign male or female (copied from the minfi", "format='meth') and lets you customize the import if your files", "as M/F; controls_report() does NOT do this step, but should.", "`poobah_values.pkl` file can be found in path, the dataframe returned", "a plot on screen. return_labels: (requires plot == True) When", "(actual_age - predicted_age), it simply adds those this label to", "correspond to, you can rerun the function with return_labels=True and", "= {'27k','450k','epic','epic+','mouse'} try: from methylprep.files import Manifest from methylprep.models import", "sex sex0 = ['F' if x < median_cutoff else 'M'", "dict passed in, such as (actual_age - predicted_age), it simply", "or female (copied from the minfi sex predict function) include_probe_failure_percent:", "sex0 output = output.round(1) # if poobah_df exists, calculate percent", "the easiest way, you can also pass in a data_containers", "will calculate and predict the sex of each sample. inputs:", "case of the relevant column renamed_column = None if ('Gender'", "'27k', '450k', 'epic', 'epic+' LOGGER.setLevel(logging.INFO) x_probes = manifest.index[manifest['CHR']=='X'] y_probes =", "be manipulated by user --- default = -2 --- used", "for s in meth.columns], columns=['x_median','y_median','predicted_sex']) # get median values for", "want to compare predicted sex with actual sex. data_containers --", "if `include_probe_failure_percent` is False. plot True: creates a plot, with", "Y_col #output.index.map(Y_col) if failed_samples != []: LOGGER.warning(f\"{len(failed_samples)} samples had >10%", "'Y_fail_percent'] - color is sex, pink or blue - marker", "recoded as M/F; controls_report() does NOT do this step, but", "-2, include_probe_failure_percent=True, poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False): \"\"\"This will calculate and", "first, then csvs or parsing the containers (which are both", "meth.columns], columns=['x_median','y_median','predicted_sex']) # get median values for each sex chromosome", "or `return_fig`. save True: saves the plot, if plot is", "[] failed_samples = [] for column in poobah.columns: sample_failure_percent[column] =", "loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'}) elif 'm' in sex_values or 'f'", "poobah_cutoff, the figure only includes A-Z,1...N labels on samples on", "a dictionary with keys as sample_ids and values as labels", "if there is a \"custom_label\" dict passed in, such as", "-2 --- used to predict sex sex0 = ['F' if", "renamed_column = 'Gender' elif 'Sex' in loaded_files['meta'].columns: renamed_column = 'Sex'", "and Path(data_source,'poobah_values.pkl').expanduser().exists(): poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser()) elif data_source_type in ('container'): #", "is the easiest way, you can also pass in a", "return_fig If True, returns a pyplot figure instead of a", "listed in your samplesheet are not understood (expecting M or", "samples matched actual sex for {row.Index}, because Sample_ID repeats in", "_plot_predicted_sex(data=pd.DataFrame(), sample_failure_percent={}, median_cutoff= -2, include_probe_failure_percent=True, verbose=False, save=False, poobah_cutoff=20, #% custom_label=None,", "check that controls_report still works with this function - save_fig", "try: meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=False, verbose=False)", "= round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1) X_col.append(X_percent) Y_col.append(Y_percent) if X_percent > 10: failed_samples.append(column) output['X_fail_percent']", "= ArrayType(array_type) else: raise ValueError(f\"Your array_type must be one of", "loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'}) elif 'MALE' in sex_values or 'FEMALE' in sex_values:", "the sex chromosomes x_meth = meth[meth.index.isin(x_probes)] x_unmeth = unmeth[unmeth.index.isin(x_probes)] y_meth", "it simply adds those this label to the marker text", "column in the output table and on the plot. Note:", "Note: you must supply a 'path' as data_source to include", "labels. Dicts must match the data DF index. \"\"\" if", "more data about samples to the multi-dimensional QC plot while", "= 'predicted_sexes.png' if data_source_type != 'path' else Path(data_source,'predicted_sexes.png').expanduser() plt.savefig(filepath, bbox_inches=\"tight\")", "Y if include_probe_failure_percent == True and isinstance(poobah, pd.DataFrame): p_value_cutoff =", "if verbose: LOGGER.info(f\"Found {len(x_probes)} X and {len(y_probes)} Y probes\") #", "working for batches yet. try: meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None,", "to the marker text labels. Dicts must match the data", "index when you pass it in.\") raise KeyError(\"Could not read", "testing: GSE85566/GPL13534 (N=120) has 4 samples that are predicted as", "lookup dict instead of plot if there is a \"custom_label\"", "fail p-value probe detection. Predictions for these may be unreliable:\")", "your output DataFrame; Set that to the index when you", "if 'Sample_ID' in loaded_files['meta'].columns: loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') elif 'Sentrix_ID' in", "samples are circles (if samplesheet contains actual sex data) -", "`return_fig`. save True: saves the plot, if plot is True", "else: raise ValueError(f\"Your array_type must be one of these: {allowed_array_types}", "_get_copy_number(x_meth,x_unmeth).median() y_med = _get_copy_number(y_meth,y_unmeth).median() # populate output dataframe with values", "= 'Sex' if renamed_column is not None: # next, ensure", "lookup dictionary of labels if return_fig: return fig return output", "circle size will be larger and more faded if poobah", "in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'}) elif 'male' in sex_values", "'sample_sheet_meta_data.pkl': 'meta', '*_meta_data.pkl': 'meta', '*samplesheet*.csv': 'meta', '*sample_sheet*.csv': 'meta', } loaded_files", "sheet must have a Sample_ID column, or (Sentrix_ID and Sentrix_Position)", "None if 'sample_failure_percent' not in data.columns else \"sample_failure_percent\" sample_sizes =", "# if only one sex, make sure male is blue;", "step, but should. sex_values = set(loaded_files['meta'][renamed_column].unique()) #print('sex_values', sex_values) if not", "'MALE' in sex_values or 'FEMALE' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M',", "actual_sex.upper() == str(row.predicted_sex).upper() else 0 else: sex_matches = np.nan output.loc[row.Index,", "bbox_inches=\"tight\") if return_fig: return fig plt.show() def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output): \"\"\"output", "Path(data_source,'poobah_values.pkl').expanduser().exists(): poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser()) elif data_source_type in ('container'): # this", "is False. plot True: creates a plot, with option to", "samplesheet contains actual sex data) - omits labels for samples", "if not specified, it will load the data from data_source", "saved pickles function isn't working for batches yet. try: meth,", "# if hasattr(output, 'actual_sex') and set(output.actual_sex) == set('M') # if", "except KeyError: if 'Sample_ID' in output.columns: LOGGER.warning(\"Sample_ID was another column", "sex with actual sex. data_containers -- object created from methylprep.run_pipeline()", "if any did. This feature won't work if a containers", "set(x_probes)) failed_y_probe_names = list(set(failed_probe_names) & set(y_probes)) X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1) Y_percent", "with this function - save_fig - return_labels, returns a lookup", "install `methylprep`)\") (data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source) # data_source_type is one", "will also include percent of probes for X and Y", "= sns.set_palette(sns.color_palette(['#0671B7','#FE6E89'])) fig = sns.relplot(data=data, x='x_median', y='y_median', hue=\"predicted_sex\", size=show_failure, style=show_mismatches,", "methylprep to be installed (pip3 install `methylprep`)\") (data_source_type, data_source) =", "{ 'sample_sheet_meta_data.pkl': 'meta', '*_meta_data.pkl': 'meta', '*samplesheet*.csv': 'meta', '*sample_sheet*.csv': 'meta', }", "of sample probes that can fail before the sample fails.", "passed in, instead of a path. Note: ~90% of Y", "in Path(filepath).expanduser().rglob(file_pattern): if '.pkl' in filename.suffixes: loaded_files['meta'] = pd.read_pickle(filename) break", "dataframe. Default is False. Note: return_fig will not show a", "match, so matches may not be accurate.\") actual_sex = actual_sex[0]", "plt.gca().get_ylim() xscale = plt.gca().get_xlim() if abs(yscale[1]-yscale[0]) < 2.0: ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1)", "a list of data_containers containing raw meth/unmeth values, instead. This", "output = pd.DataFrame(index=[s for s in meth.columns], columns=['x_median','y_median','predicted_sex']) # get", "of the relevant column renamed_column = None if ('Gender' in", "# this will look for saved pickles first, then csvs", "used to predict sex sex0 = ['F' if x <", "style=show_mismatches, sizes=sample_sizes, alpha=.5, palette=custom_palette, height=8, aspect=1.34) ax = fig.axes[0,0] fig.fig.subplots_adjust(top=.95)", "the plot, if plot is True return_fig If True, returns", "True, returns a pyplot figure instead of a dataframe. Default", "the marker text labels. Dicts must match the data DF", "(using custom column dict) - unit tests with custom label", "set(output.actual_sex) == set('M') # if first value to be plotted", "auto-detected array here array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda)) elif isinstance(array_type,str): if", "show_failure = None if 'sample_failure_percent' not in data.columns else \"sample_failure_percent\"", "also include percent of probes for X and Y chromosomes", "failed sample_failure_percent = {} # % of ALL probes in", "custom column dict) - unit tests with custom label and", "predicted sex output['predicted_sex'] = sex0 output = output.round(1) # if", "ALL probes in sample, not just X or Y if", "marker circle size will be larger and more faded if", "{len(meth)} -- unmeth {len(unmeth)}\") if array_type == None: # get", "= None if ('Gender' in loaded_files['meta'].columns or 'Sex' in loaded_files['meta'].columns):", "object created from methylprep.run_pipeline() or methylcheck.load(path, 'meth') tuple of (meth,", "include_probe_failure_percent: True: includes poobah percent per sample as column in", "sex because actual sexes listed in your samplesheet are not", "return_labels=return_labels, ) if return_labels: return fig # these are a", "methylprep models too and detect_array() import logging LOGGER = logging.getLogger(__name__)", "this step, but should. sex_values = set(loaded_files['meta'][renamed_column].unique()) #print('sex_values', sex_values) if", "Sample_ID repeats in sample sheets. Only using first match, so", "of Y probes should fail if the sample is female.", "a lookup dictionary of labels if return_fig: return fig return", "in sample, not just X or Y if include_probe_failure_percent ==", "based on matching X and Y probes.\") if save: filepath", "using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and auto-detected array here array_type =", "`methylprep`)\") (data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source) # data_source_type is one of", "prep 1.6.0 meta_data lacking Sample_ID when sample_sheet uses alt column", "= plt.gca().get_xlim() if abs(yscale[1]-yscale[0]) < 2.0: ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1) ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1)", "meth and unmeth values for the sex chromosomes x_meth =", "be at least 2 units. yscale = plt.gca().get_ylim() xscale =", "column in your output DataFrame; Set that to the index", "custom_label=None, return_fig=False, return_labels=False): \"\"\"This will calculate and predict the sex", "pass in a data_containers object, a list of data_containers containing", "= [] failed_samples = [] for column in poobah.columns: sample_failure_percent[column]", "None if label: ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center', fontsize=10, color='grey') if", "samples that have LOW failure rates, but shows IDs when", "function isn't working for batches yet. try: meth, unmeth =", "median_difference] # NOTE for testing: GSE85566/GPL13534 (N=120) has 4 samples", "xmax=xscale[1]+1) ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1) label_lookup = {index_val: chr(i+65) if (i <=", "{} for file_pattern in file_patterns: for filename in Path(filepath).expanduser().rglob(file_pattern): if", "X_col.append(X_percent) Y_col.append(Y_percent) if X_percent > 10: failed_samples.append(column) output['X_fail_percent'] = X_col", "to apply to samples. e.g. add more data about samples", "also pass in a data_containers object, a list of data_containers", "impression of bad samples. poobah_range = data[\"sample_failure_percent\"].max() - data[\"sample_failure_percent\"].min() if", "and detect_array() import logging LOGGER = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def _get_copy_number(meth,unmeth):", "false impression of bad samples. poobah_range = data[\"sample_failure_percent\"].max() - data[\"sample_failure_percent\"].min()", "contains processed sample data path -- to a folder with", "from methylprep.models import ArrayType except ImportError: raise ImportError(\"This function requires", "any Y probes - using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and auto-detected", "feature won't work if a containers object or tuple of", "X probes fail p-value probe detection. Predictions for these may", "logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def _get_copy_number(meth,unmeth): \"\"\"function to return copy number. requires", "are predicted as wrong sex when using -2, but work", "'actual_sex') and set(output.actual_sex) == set('M') # if first value to", "F): (found {sex_values})\") output['actual_sex'] = None output['sex_matches'] = None for", "'Sex' in loaded_files['meta'].columns: renamed_column = 'Sex' else: renamed_columns = {col:(col.title()", "of labels if return_fig: return fig return output def _plot_predicted_sex(data=pd.DataFrame(),", "methylcheck # uses .load; get_sex uses methylprep models too and", "return_fig=False, return_labels=False): \"\"\" data columns: ['x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent']", "plot=False, save=False, on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True, poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False):", ">= p_value_cutoff].index failed_x_probe_names = list(set(failed_probe_names) & set(x_probes)) failed_y_probe_names = list(set(failed_probe_names)", "sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'}) else: raise ValueError(f\"Cannot compare with", "samples that are predicted as wrong sex when using -2,", "data_containers -- object created from methylprep.run_pipeline() or methylcheck.load(path, 'meth') tuple", "hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0] == 'M': custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89'])) fig", "failed_samples.append(column) output['X_fail_percent'] = X_col #output.index.map(X_col) output['Y_fail_percent'] = Y_col #output.index.map(Y_col) if", "= data[\"sex_matches\"].map({0:\"Mismatch\", 1:\"Match\"}) show_failure = None if 'sample_failure_percent' not in", "= 'Gender' elif 'Sex' in loaded_files['meta'].columns: renamed_column = 'Sex' else:", "that have LOW failure rates, but shows IDs when failed", "in loaded_files['meta'].columns: renamed_column = 'Sex' else: renamed_columns = {col:(col.title() if", "in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'}) else: raise ValueError(f\"Cannot compare", "Dicts must match the data DF index. \"\"\" if sample_failure_percent", "default = -2 --- used to predict sex sex0 =", "= {index_val: chr(i+65) if (i <= 26) else str(i-26) for", "loaded_files['meta'].columns): if 'Gender' in loaded_files['meta'].columns: renamed_column = 'Gender' elif 'Sex'", "= {} for file_pattern in file_patterns: for filename in Path(filepath).expanduser().rglob(file_pattern):", "is 20 (percent) Has no effect if `include_probe_failure_percent` is False.", "column dict) - unit tests with custom label and without,", "do this step, but should. sex_values = set(loaded_files['meta'][renamed_column].unique()) #print('sex_values', sex_values)", "sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) / len(poobah.index),1) failed_probe_names = poobah[column][poobah[column]", "raise ImportError(\"This function requires methylprep to be installed (pip3 install", "# populate dataframe with predicted sex output['predicted_sex'] = sex0 output", "loaded. Just passes in meth/unmeth data # Sample sheet should", "if 'sex_matches' not in data.columns else \"sex_matches\" if show_mismatches: data[\"sex_matches\"]", "if first value to be plotted is male, change palette", "methylprep v1.5.4-6 was creating meta_data files with two Sample_ID columns.", "log2(getMeth(object) + getUnmeth(object)) return np.log2(meth+unmeth) def get_sex(data_source, array_type=None, verbose=False, plot=False,", "in ('path'): # this will look for saved pickles first,", "4 samples that are predicted as wrong sex when using", "'sex_matches' not in data.columns else \"sex_matches\" if show_mismatches: data[\"sex_matches\"] =", "variance. Like a probability cloud. - sample text is (ID,", "unmeth values for the sex chromosomes x_meth = meth[meth.index.isin(x_probes)] x_unmeth", "else: LOGGER.warning(\"sample_failure_percent index did not align with output data index\")", "meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=False, verbose=False) except", "of bad samples. poobah_range = data[\"sample_failure_percent\"].max() - data[\"sample_failure_percent\"].min() if poobah_range", "number. requires dataframes of methylated and unmethylated values. can be", "0.7.9 / prep 1.6.0 meta_data lacking Sample_ID when sample_sheet uses", "output['y_median'] - output['x_median'] # median cutoff - can be manipulated", "dict) - unit tests with custom label and without, and", "alpha=.5, palette=custom_palette, height=8, aspect=1.34) ax = fig.axes[0,0] fig.fig.subplots_adjust(top=.95) # for", "to be installed (pip3 install `methylprep`)\") (data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source)", "output['x_median'] # median cutoff - can be manipulated by user", "or None.\") if verbose: LOGGER.debug(array_type) LOGGER.setLevel(logging.WARNING) manifest = Manifest(array_type, on_lambda=on_lambda,", "scale to be at least 2 units. yscale = plt.gca().get_ylim()", "isinstance(custom_label, dict) else None if label: ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center',", "= _plot_predicted_sex(data=output, # 'x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent' sample_failure_percent=sample_failure_percent, median_cutoff=median_cutoff,", "and 'Sentrix_Position' in loaded_files['meta'].columns: loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) + '_' +", "#output.index.map(X_col) output['Y_fail_percent'] = Y_col #output.index.map(Y_col) if failed_samples != []: LOGGER.warning(f\"{len(failed_samples)}", "path -- to a folder with csv data that contains", "the multi-dimensional QC plot while providing a filepath is the", "show_mismatches = None if 'sex_matches' not in data.columns else \"sex_matches\"", "'Sex' in loaded_files['meta'].columns): if 'Gender' in loaded_files['meta'].columns: renamed_column = 'Gender'", "as e: meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=True,", "few points close together, set the min scale to be", "be accurate.\") actual_sex = actual_sex[0] if hasattr(row,'predicted_sex'): sex_matches = 1", "on labels (using custom column dict) - unit tests with", "(i <= 26) else str(i-26) for i,index_val in enumerate(data.index)} for", "to compare predicted sex with actual sex. data_containers -- object", "ArrayType(array_type) else: raise ValueError(f\"Your array_type must be one of these:", "just X or Y if include_probe_failure_percent == True and isinstance(poobah,", "percent of probes for X and Y chromosomes that failed", "of methylated and unmethylated values. can be raw OR corrected\"\"\"", "len(loaded_files) == 1: # methylprep v1.5.4-6 was creating meta_data files", "in output.itertuples(): try: actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column)) except KeyError: if 'Sample_ID'", "column names and gets replaced. if any(loaded_files['meta'].columns.duplicated()): loaded_files['meta'] = loaded_files['meta'].loc[:,", "for you. median_cutoff the minimum difference in the medians of", "and row['sample_failure_percent'] > poobah_cutoff: label = f\"{label_lookup[idx]}, {custom_label.get(idx)}\" if isinstance(custom_label,", "sample_failure_percent={}, median_cutoff= -2, include_probe_failure_percent=True, verbose=False, save=False, poobah_cutoff=20, #% custom_label=None, data_source_type=None,", "each sample x_med = _get_copy_number(x_meth,x_unmeth).median() y_med = _get_copy_number(y_meth,y_unmeth).median() # populate", "ensure samplesheet Sex/Gender (Male/Female) are recoded as M/F; controls_report() does", "show_failure: # avoid sizing dots with narrow range; gives false", "if low variance. Like a probability cloud. - sample text", "first value to be plotted is male, change palette if", "'Sentrix_Position' in loaded_files['meta'].columns: loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) + '_' + loaded_files['meta']['Sentrix_Position'].astype(str)", "When using poobah_cutoff, the figure only includes A-Z,1...N labels on", "in data.columns: N_failed = len(data[data['sample_failure_percent'] > poobah_cutoff].index) N_total = len(data['sample_failure_percent'].index)", "detect_array() import logging LOGGER = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def _get_copy_number(meth,unmeth): \"\"\"function", "a filepath is the easiest way, you can also pass", "sex_matches = np.nan output.loc[row.Index, 'actual_sex'] = actual_sex output.loc[row.Index, 'sex_matches'] =", "Sample sheet should have 'M' or 'F' in column to", "methylprep.files import Manifest from methylprep.models import ArrayType except ImportError: raise", "and values as labels to apply to samples. e.g. add", "samples on plot to make it easier to read. So", "ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1) ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1) label_lookup = {index_val: chr(i+65) if (i", "# merge actual sex into processed output, if available file_patterns", "lacking Sample_ID when sample_sheet uses alt column names and gets", ">10% of X probes fail p-value probe detection. Predictions for", "as (actual_age - predicted_age), it simply adds those this label", "and returns it.\"\"\" # controls_report() does the same thing, and", "output dataframe with values output['x_median'] = output.index.map(x_med) output['y_median'] = output.index.map(y_med)", "a plot, with option to `save` as image or `return_fig`.", "sizing dots with narrow range; gives false impression of bad", "sns.relplot(data=data, x='x_median', y='y_median', hue=\"predicted_sex\", size=show_failure, style=show_mismatches, sizes=sample_sizes, alpha=.5, palette=custom_palette, height=8,", "actual sex for {row.Index}, because Sample_ID repeats in sample sheets.", "output.loc[row.Index, 'actual_sex'] = actual_sex output.loc[row.Index, 'sex_matches'] = sex_matches else: pass", "is sex, pink or blue - marker circle size will", "function isn't working for batches yet. meth, unmeth = methylcheck.qc_plot._get_data(", "column, or (Sentrix_ID and Sentrix_Position) columns.\") # fixing case of", "too and detect_array() import logging LOGGER = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def", "= 0.05 X_col = [] Y_col = [] failed_samples =", "set(y_probes)) X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1) Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1) X_col.append(X_percent) Y_col.append(Y_percent) if", "# NOTE for testing: GSE85566/GPL13534 (N=120) has 4 samples that", "= None for row in output.itertuples(): try: actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column))", "data index\") #sns.set_theme(style=\"white\") show_mismatches = None if 'sex_matches' not in", "return_fig=return_fig, return_labels=return_labels, ) if return_labels: return fig # these are", "'Sample_ID' in loaded_files['meta'].columns: loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') elif 'Sentrix_ID' in loaded_files['meta'].columns", "data[\"sample_failure_percent\"].max() - data[\"sample_failure_percent\"].min() if poobah_range < poobah_cutoff/2: show_failure = None", "gets replaced. if any(loaded_files['meta'].columns.duplicated()): loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()] LOGGER.info(\"Removed a", "screen. return_labels: (requires plot == True) When using poobah_cutoff, the", "import Manifest from methylprep.models import ArrayType except ImportError: raise ImportError(\"This", "label: ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center', fontsize=10, color='grey') if return_labels: plt.close()", "custom_label.get(idx) else label_lookup[idx] ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center', fontsize=10, color='darkred') else:", "with predicted sex because actual sexes listed in your samplesheet", "close together, set the min scale to be at least", "to be fast, because these are already loaded. Just passes", "} loaded_files = {} for file_pattern in file_patterns: for filename", "R version: # log2(getMeth(object) + getUnmeth(object)) return np.log2(meth+unmeth) def get_sex(data_source,", "as np import matplotlib.pyplot as plt import seaborn as sns", "= set(loaded_files['meta'][renamed_column].unique()) #print('sex_values', sex_values) if not sex_values.issubset(set(['M','F'])): # subset, because", "to embed in a PDF report if you like. custom_label:", "output output = pd.DataFrame(index=[s for s in meth.columns], columns=['x_median','y_median','predicted_sex']) #", "return_labels=False): \"\"\"This will calculate and predict the sex of each", "\"data_source\" can be any one of: path -- to a", "Manifest from methylprep.models import ArrayType except ImportError: raise ImportError(\"This function", "poobah.columns: sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) / len(poobah.index),1) failed_probe_names =", "verbose=verbose)._Manifest__data_frame # 'custom', '27k', '450k', 'epic', 'epic+' LOGGER.setLevel(logging.INFO) x_probes =", "show_failure = None sample_sizes = (40,40) custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7'])) #", "Path(filepath).expanduser().rglob(file_pattern): if '.pkl' in filename.suffixes: loaded_files['meta'] = pd.read_pickle(filename) break if", "else \"sample_failure_percent\" sample_sizes = (20, 600) if show_failure: # avoid", "getUnmeth(object)) return np.log2(meth+unmeth) def get_sex(data_source, array_type=None, verbose=False, plot=False, save=False, on_lambda=False,", "return label_lookup if \"sample_failure_percent\" in data.columns: N_failed = len(data[data['sample_failure_percent'] >", "manifest = Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom', '27k', '450k', 'epic',", "the relevant column renamed_column = None if ('Gender' in loaded_files['meta'].columns", "X_percent > 10: failed_samples.append(column) output['X_fail_percent'] = X_col #output.index.map(X_col) output['Y_fail_percent'] =", "\"sample_failure_percent\" in row and row['sample_failure_percent'] > poobah_cutoff: label = f\"{label_lookup[idx]},", "in loaded_files['meta'].columns and 'Sentrix_Position' in loaded_files['meta'].columns: loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) +", "= round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) / len(poobah.index),1) failed_probe_names = poobah[column][poobah[column] >=", "function with return_labels=True and it will skip plotting and just", "(requires plot == True) When using poobah_cutoff, the figure only", "('path'): # this will look for saved pickles first, then", "set('M') # if first value to be plotted is male,", "row['sample_failure_percent'] > poobah_cutoff: label = f\"{label_lookup[idx]}, {custom_label.get(idx)}\" if isinstance(custom_label, dict)", "replaced. if any(loaded_files['meta'].columns.duplicated()): loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()] LOGGER.info(\"Removed a duplicate", "'meta', '*sample_sheet*.csv': 'meta', } loaded_files = {} for file_pattern in", "loaded_files['meta'].columns: loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') elif 'Sentrix_ID' in loaded_files['meta'].columns and 'Sentrix_Position'", "if only one sex, make sure male is blue; female", "adds actual_sex as a column and returns it.\"\"\" # controls_report()", "= unmeth[unmeth.index.isin(x_probes)] y_meth = meth[meth.index.isin(y_probes)] y_unmeth = unmeth[unmeth.index.isin(y_probes)] # create", "if X_percent > 10: failed_samples.append(column) output['X_fail_percent'] = X_col #output.index.map(X_col) output['Y_fail_percent']", "sns.set_palette(sns.color_palette(['#0671B7','#FE6E89'])) fig = sns.relplot(data=data, x='x_median', y='y_median', hue=\"predicted_sex\", size=show_failure, style=show_mismatches, sizes=sample_sizes,", "as column in the output table and on the plot.", "y_med = _get_copy_number(y_meth,y_unmeth).median() # populate output dataframe with values output['x_median']", "detection. Predictions for these may be unreliable:\") LOGGER.warning(f\"{failed_samples}\") if data_source_type", "here: # methylcheck 0.7.9 / prep 1.6.0 meta_data lacking Sample_ID", "loaded_files['meta'].columns: loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) + '_' + loaded_files['meta']['Sentrix_Position'].astype(str) loaded_files['meta'] =", "the minfi sex predict function) include_probe_failure_percent: True: includes poobah percent", "manifest.index[manifest['CHR']=='X'] y_probes = manifest.index[manifest['CHR']=='Y'] if verbose: LOGGER.info(f\"Found {len(x_probes)} X and", "tuple of (meth, unmeth) dataframes array_type (string) enum: {'27k','450k','epic','epic+','mouse'} if", "loaded_files = {} for file_pattern in file_patterns: for filename in", "image or `return_fig`. save True: saves the plot, if plot", "is female. That chromosome is missing.\"\"\" allowed_array_types = {'27k','450k','epic','epic+','mouse'} try:", "in renamed_columns.values(): renamed_column = 'Gender' elif 'Sex' in renamed_columns.values(): renamed_column", "and labels - show delta age on labels (using custom", "and set(sample_failure_percent.keys()) == set(data.index): data['sample_failure_percent'] = pd.Series(sample_failure_percent) else: LOGGER.warning(\"sample_failure_percent index", "= manifest.index[manifest['CHR']=='Y'] if verbose: LOGGER.info(f\"Found {len(x_probes)} X and {len(y_probes)} Y", "return a dictionary with sample_ids and these labels, to embed", "if array_type in allowed_array_types: array_type = ArrayType(array_type) else: raise ValueError(f\"Your", "of a dataframe. Default is False. Note: return_fig will not", "models too and detect_array() import logging LOGGER = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO)", "file can be found in path, the dataframe returned will", "(non-standand CSV columns, for example) If a `poobah_values.pkl` file can", "data_source_type in ('container'): # this will look for saved pickles", "available file_patterns = { 'sample_sheet_meta_data.pkl': 'meta', '*_meta_data.pkl': 'meta', '*samplesheet*.csv': 'meta',", "get what sample_ids these labels correspond to, you can rerun", "index. This adds actual_sex as a column and returns it.\"\"\"", "sample as column in the output table and on the", "version: # log2(getMeth(object) + getUnmeth(object)) return np.log2(meth+unmeth) def get_sex(data_source, array_type=None,", "poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser()) elif data_source_type in ('container'): # this will", "median_difference = output['y_median'] - output['x_median'] # median cutoff - can", "object is produced by methylprep.run_pipeline, or by using methylcheck.load(filepath, format='meth')", "in the output table and on the plot. Note: you", "the minimum of data to be fast, because these are", "file_patterns: for filename in Path(filepath).expanduser().rglob(file_pattern): if '.pkl' in filename.suffixes: loaded_files['meta']", "('sex','gender') else col) for col in loaded_files['meta'].columns} loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns)", "work at -0.5. # populate dataframe with predicted sex output['predicted_sex']", "plt.close() # release memory return label_lookup if \"sample_failure_percent\" in data.columns:", "while providing a filepath is the easiest way, you can", "if show_failure: # avoid sizing dots with narrow range; gives", "Path(data_source,'predicted_sexes.png').expanduser() plt.savefig(filepath, bbox_inches=\"tight\") if return_fig: return fig plt.show() def _fetch_actual_sex_from_sample_sheet_meta_data(filepath,", "of meth and unmeth values for the sex chromosomes x_meth", "methylcheck.load(filepath, format='meth') and lets you customize the import if your", "isn't working for batches yet. try: meth, unmeth = methylcheck.qc_plot._get_data(", "_fetch_actual_sex_from_sample_sheet_meta_data(data_source, output) if plot == True: fig = _plot_predicted_sex(data=output, #", "horizontalalignment='center', fontsize=10, color='grey') if return_labels: plt.close() # release memory return", "fontsize=10, color='darkred') else: label = f\"{custom_label.get(idx)}\" if isinstance(custom_label, dict) else", "True) When using poobah_cutoff, the figure only includes A-Z,1...N labels", "path=None, compare=False, noob=False, verbose=False) elif data_source_type == 'meth_unmeth_tuple': (meth, unmeth)", "data['sample_failure_percent'] = pd.Series(sample_failure_percent) else: LOGGER.warning(\"sample_failure_percent index did not align with", "unit tests with custom label and without, and check that", "of probes for X and Y chromosomes that failed quality", "Just passes in meth/unmeth data # Sample sheet should have", "values for the sex chromosomes x_meth = meth[meth.index.isin(x_probes)] x_unmeth =", "for x in median_difference] # NOTE for testing: GSE85566/GPL13534 (N=120)", "data_containers=data_source, path=None, compare=False, noob=False, verbose=False) elif data_source_type == 'meth_unmeth_tuple': (meth,", "median values for each sex chromosome for each sample x_med", "as data_source to include poobah in plots. poobah_cutoff The maximum", "shows IDs when failed - adds legend of sketchy samples", "LOGGER.warning(f\"{len(failed_samples)} samples had >10% of X probes fail p-value probe", "and more faded if poobah values are worse, smaller and", "will not show a plot on screen. return_labels: (requires plot", "\"sex_matches\" if show_mismatches: data[\"sex_matches\"] = data[\"sex_matches\"].map({0:\"Mismatch\", 1:\"Match\"}) show_failure = None", "as labels to apply to samples. e.g. add more data", "file_patterns = { 'sample_sheet_meta_data.pkl': 'meta', '*_meta_data.pkl': 'meta', '*samplesheet*.csv': 'meta', '*sample_sheet*.csv':", "controls_report still works with this function - save_fig - return_labels,", "s in meth.columns], columns=['x_median','y_median','predicted_sex']) # get median values for each", "& set(y_probes)) X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1) Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1) X_col.append(X_percent) Y_col.append(Y_percent)", "processed sample data path -- to a folder with the", "(percent) Has no effect if `include_probe_failure_percent` is False. plot True:", "actual_sex[0] if hasattr(row,'predicted_sex'): sex_matches = 1 if actual_sex.upper() == str(row.predicted_sex).upper()", "isn't working for batches yet. meth, unmeth = methylcheck.qc_plot._get_data( data_containers=data_source,", "'actual_sex'] = actual_sex output.loc[row.Index, 'sex_matches'] = sex_matches else: pass #", "Sample_ID in the index. This adds actual_sex as a column", "ax.set_title(f\"Predicted sex based on matching X and Y probes.\") if", "{} # % of ALL probes in sample, not just", "LOGGER.warning(\"Sample_ID was another column in your output DataFrame; Set that", "X and Y probes.\") if save: filepath = 'predicted_sexes.png' if", "with Sample_ID in the index. This adds actual_sex as a", "if data_source_type in ('path'): # this will look for saved", "and Sentrix_Position) columns.\") # fixing case of the relevant column", "pd.DataFrame): p_value_cutoff = 0.05 X_col = [] Y_col = []", "apply to samples. e.g. add more data about samples to", "if 'Male' in sex_values or 'Female' in sex_values: loaded_files['meta'][renamed_column] =", "# log2(getMeth(object) + getUnmeth(object)) return np.log2(meth+unmeth) def get_sex(data_source, array_type=None, verbose=False,", "or 'FEMALE' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'}) elif 'm'", "populate output dataframe with values output['x_median'] = output.index.map(x_med) output['y_median'] =", "poobah percent per sample as column in the output table", "aspect=1.34) ax = fig.axes[0,0] fig.fig.subplots_adjust(top=.95) # for zoomed-in plots with", "allowed_array_types = {'27k','450k','epic','epic+','mouse'} try: from methylprep.files import Manifest from methylprep.models", "are a lookup dictionary of labels if return_fig: return fig", "if abs(yscale[1]-yscale[0]) < 2.0: ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1) ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1) label_lookup =", "raw meth/unmeth values, instead. This object is produced by methylprep.run_pipeline,", "unmeth[unmeth.index.isin(y_probes)] # create empty dataframe for output output = pd.DataFrame(index=[s", "have LOW failure rates, but shows IDs when failed -", "sample is female. That chromosome is missing.\"\"\" allowed_array_types = {'27k','450k','epic','epic+','mouse'}", "is missing.\"\"\" allowed_array_types = {'27k','450k','epic','epic+','mouse'} try: from methylprep.files import Manifest", "if \"sample_failure_percent\" in row and row['sample_failure_percent'] > poobah_cutoff: label =", "it will load the data from data_source and determine the", "to provide a dictionary with keys as sample_ids and values", "did not align with output data index\") #sns.set_theme(style=\"white\") show_mismatches =", "isinstance(poobah, pd.DataFrame): p_value_cutoff = 0.05 X_col = [] Y_col =", "data path -- to a folder with the 'meth_values.pkl' and", "returned will also include percent of probes for X and", "read. So to get what sample_ids these labels correspond to,", "on_lambda=on_lambda)) elif isinstance(array_type,str): if array_type in allowed_array_types: array_type = ArrayType(array_type)", "way, you can also pass in a data_containers object, a", "= None sample_sizes = (40,40) custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7'])) # if", "np.log2(meth+unmeth) def get_sex(data_source, array_type=None, verbose=False, plot=False, save=False, on_lambda=False, median_cutoff= -2,", "if failed_samples != []: LOGGER.warning(f\"{len(failed_samples)} samples had >10% of X", "unmethylated values. can be raw OR corrected\"\"\" # minfi R", "must have a Sample_ID column, or (Sentrix_ID and Sentrix_Position) columns.\")", "sex mismatches are X, matched samples are circles (if samplesheet", "if plot is True return_fig If True, returns a pyplot", "{index_val: chr(i+65) if (i <= 26) else str(i-26) for i,index_val", "dataframe returned will also include percent of probes for X", "empty dataframe for output output = pd.DataFrame(index=[s for s in", "('Gender' in loaded_files['meta'].columns or 'Sex' in loaded_files['meta'].columns): if 'Gender' in", "loaded_files['meta'].columns: renamed_column = 'Sex' else: renamed_columns = {col:(col.title() if col.lower()", "it will skip plotting and just return a dictionary with", "'f':'F'}) else: raise ValueError(f\"Cannot compare with predicted sex because actual", "by methylprep.run_pipeline, or by using methylcheck.load(filepath, format='meth') and lets you", "= Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom', '27k', '450k', 'epic', 'epic+'", "# compute difference median_difference = output['y_median'] - output['x_median'] # median", "have a Sample_ID column, or (Sentrix_ID and Sentrix_Position) columns.\") #", "found in path, the dataframe returned will also include percent", "sample_failure_percent=sample_failure_percent, median_cutoff=median_cutoff, include_probe_failure_percent=include_probe_failure_percent, verbose=verbose, save=save, poobah_cutoff=poobah_cutoff, custom_label=custom_label, data_source_type=data_source_type, data_source=data_source, return_fig=return_fig,", "to be plotted is male, change palette if hasattr(data, 'predicted_sex')", "such as (actual_age - predicted_age), it simply adds those this", "failed_y_probe_names = list(set(failed_probe_names) & set(y_probes)) X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1) Y_percent =", "worse, smaller and darker if low variance. Like a probability", "mismatches are X, matched samples are circles (if samplesheet contains", "'f' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'}) else: raise ValueError(f\"Cannot", "data_source_type is one of {'path', 'container', 'control', 'meth_unmeth_tuple'} poobah=None if", "columns, for example) If a `poobah_values.pkl` file can be found", "dict) else None if label: ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center', fontsize=10,", "smaller and darker if low variance. Like a probability cloud.", "custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89'])) fig = sns.relplot(data=data, x='x_median', y='y_median', hue=\"predicted_sex\", size=show_failure,", "'meta', '*samplesheet*.csv': 'meta', '*sample_sheet*.csv': 'meta', } loaded_files = {} for", "path=data_source, compare=False, noob=False, verbose=False) except Exception as e: meth, unmeth", "if len(meth) != len(unmeth): raise ValueError(f\"WARNING: probe count mismatch: meth", "sex data) - omits labels for samples that have LOW", "poobah=None if data_source_type in ('path'): # this will look for", "sex. data_containers -- object created from methylprep.run_pipeline() or methylcheck.load(path, 'meth')", "break if len(loaded_files) == 1: # methylprep v1.5.4-6 was creating", "sample_sizes = (40,40) custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7'])) # if only one", "dataframes of methylated and unmethylated values. can be raw OR", "'predicted_sex', 'X_fail_percent', 'Y_fail_percent' sample_failure_percent=sample_failure_percent, median_cutoff=median_cutoff, include_probe_failure_percent=include_probe_failure_percent, verbose=verbose, save=save, poobah_cutoff=poobah_cutoff, custom_label=custom_label,", "with narrow range; gives false impression of bad samples. poobah_range", "if poobah_range < poobah_cutoff/2: show_failure = None sample_sizes = (40,40)", "are already loaded. Just passes in meth/unmeth data # Sample", "raise KeyError(\"Could not read actual sex from meta data to", "output['actual_sex'] = None output['sex_matches'] = None for row in output.itertuples():", "samples might only contain one sex if 'Male' in sex_values", "'epic', 'epic+' LOGGER.setLevel(logging.INFO) x_probes = manifest.index[manifest['CHR']=='X'] y_probes = manifest.index[manifest['CHR']=='Y'] if", "effect if `include_probe_failure_percent` is False. plot True: creates a plot,", "index did not align with output data index\") #sns.set_theme(style=\"white\") show_mismatches", "plots with few points close together, set the min scale", "= 'Gender' elif 'Sex' in renamed_columns.values(): renamed_column = 'Sex' if", "(N=120) has 4 samples that are predicted as wrong sex", "are circles (if samplesheet contains actual sex data) - omits", "predict function) include_probe_failure_percent: True: includes poobah percent per sample as", "loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'}) elif 'male' in sex_values or 'female'", "of each sample. inputs: ======= the \"data_source\" can be any", "of: path -- to a folder with csv data that", "example) If a `poobah_values.pkl` file can be found in path,", ".methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and auto-detected array here array_type = ArrayType(methylcheck.detect_array(meth,", "len(poobah.index),1) failed_probe_names = poobah[column][poobah[column] >= p_value_cutoff].index failed_x_probe_names = list(set(failed_probe_names) &", "one sex if 'Male' in sex_values or 'Female' in sex_values:", "= f\"{label_lookup[idx]}, {custom_label.get(idx)}\" if isinstance(custom_label, dict) and custom_label.get(idx) else label_lookup[idx]", "Sex/Gender (Male/Female) are recoded as M/F; controls_report() does NOT do", "label = f\"{label_lookup[idx]}, {custom_label.get(idx)}\" if isinstance(custom_label, dict) and custom_label.get(idx) else", "a dictionary with sample_ids and these labels, to embed in", "values as labels to apply to samples. e.g. add more", "warn the user if any did. This feature won't work", "compare=False, noob=False, verbose=False) except Exception as e: meth, unmeth =", "for col in loaded_files['meta'].columns} loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns) if 'Gender' in", "return_labels: (requires plot == True) When using poobah_cutoff, the figure", "(20, 600) if show_failure: # avoid sizing dots with narrow", "missing.\"\"\" allowed_array_types = {'27k','450k','epic','epic+','mouse'} try: from methylprep.files import Manifest from", "compare=False, noob=True, verbose=False) if include_probe_failure_percent == True and Path(data_source,'poobah_values.pkl').expanduser().exists(): poobah", "or blue - marker circle size will be larger and", "at least {poobah_cutoff}% of probes failing\") else: ax.set_title(f\"Predicted sex based", "provide a dictionary with keys as sample_ids and values as", "unmeth {len(unmeth)}\") if array_type == None: # get list of", "set(sample_failure_percent.keys()) == set(data.index): data['sample_failure_percent'] = pd.Series(sample_failure_percent) else: LOGGER.warning(\"sample_failure_percent index did", "LOGGER.info(f\"Found {len(x_probes)} X and {len(y_probes)} Y probes\") # dataframes of", "list of data_containers containing raw meth/unmeth values, instead. This object", "containing raw meth/unmeth values, instead. This object is produced by", "verbose=False) if include_probe_failure_percent == True and Path(data_source,'poobah_values.pkl').expanduser().exists(): poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser())", "in loaded_files['meta'].columns): if 'Gender' in loaded_files['meta'].columns: renamed_column = 'Gender' elif", "data DF index. \"\"\" if sample_failure_percent != {} and set(sample_failure_percent.keys())", "these labels, to embed in a PDF report if you", "files were not prepared using methylprep (non-standand CSV columns, for", "path, the dataframe returned will also include percent of probes", "not sex_values.issubset(set(['M','F'])): # subset, because samples might only contain one", "'M': custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89'])) fig = sns.relplot(data=data, x='x_median', y='y_median', hue=\"predicted_sex\",", "plot, if plot is True return_fig If True, returns a", "column renamed_column = None if ('Gender' in loaded_files['meta'].columns or 'Sex'", "if plot == True: fig = _plot_predicted_sex(data=output, # 'x_median', 'y_median',", "if not sex_values.issubset(set(['M','F'])): # subset, because samples might only contain", "is pink # if hasattr(output, 'actual_sex') and set(output.actual_sex) == set('M')", "get median values for each sex chromosome for each sample", "relevant column renamed_column = None if ('Gender' in loaded_files['meta'].columns or", "and check that controls_report still works with this function -", "len(data[data['sample_failure_percent'] > poobah_cutoff].index) N_total = len(data['sample_failure_percent'].index) ax.set_title(f\"{N_failed} of {N_total} samples", "- omits labels for samples that have LOW failure rates,", "-- to a folder with the 'meth_values.pkl' and 'unmeth_values.pkl' dataframes", "{len(x_probes)} X and {len(y_probes)} Y probes\") # dataframes of meth", "for samples that have LOW failure rates, but shows IDs", "these may be unreliable:\") LOGGER.warning(f\"{failed_samples}\") if data_source_type in ('path'): output", "'450k', 'epic', 'epic+' LOGGER.setLevel(logging.INFO) x_probes = manifest.index[manifest['CHR']=='X'] y_probes = manifest.index[manifest['CHR']=='Y']", "= [] Y_col = [] failed_samples = [] for column", "and on the plot. Note: you must supply a 'path'", "> poobah_cutoff].index) N_total = len(data['sample_failure_percent'].index) ax.set_title(f\"{N_failed} of {N_total} samples failed", "to get what sample_ids these labels correspond to, you can", "= [] for column in poobah.columns: sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >=", "if x < median_cutoff else 'M' for x in median_difference]", "(meth, unmeth) dataframes array_type (string) enum: {'27k','450k','epic','epic+','mouse'} if not specified,", "rerun the function with return_labels=True and it will skip plotting", "X and Y probe copy numbers to assign male or", "round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1) Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1) X_col.append(X_percent) Y_col.append(Y_percent) if X_percent > 10:", "sex if 'Male' in sex_values or 'Female' in sex_values: loaded_files['meta'][renamed_column]", "data_source_type=data_source_type, data_source=data_source, return_fig=return_fig, return_labels=return_labels, ) if return_labels: return fig #", "label to the marker text labels. Dicts must match the", "median_cutoff else 'M' for x in median_difference] # NOTE for", "no effect if `include_probe_failure_percent` is False. plot True: creates a", "the sex of each sample. inputs: ======= the \"data_source\" can", "noob=True, verbose=False) if include_probe_failure_percent == True and Path(data_source,'poobah_values.pkl').expanduser().exists(): poobah =", "#% custom_label=None, data_source_type=None, data_source=None, return_fig=False, return_labels=False): \"\"\" data columns: ['x_median',", "sex output['predicted_sex'] = sex0 output = output.round(1) # if poobah_df", "- data[\"sample_failure_percent\"].min() if poobah_range < poobah_cutoff/2: show_failure = None sample_sizes", "in data.iterrows(): if \"sample_failure_percent\" in row and row['sample_failure_percent'] > poobah_cutoff:", "loaded_files['meta'].rename(columns=renamed_columns) if 'Gender' in renamed_columns.values(): renamed_column = 'Gender' elif 'Sex'", "'Gender' in loaded_files['meta'].columns: renamed_column = 'Gender' elif 'Sex' in loaded_files['meta'].columns:", "to read. So to get what sample_ids these labels correspond", "fail if the sample is female. That chromosome is missing.\"\"\"", "np import matplotlib.pyplot as plt import seaborn as sns from", "or methylcheck.load(path, 'meth') tuple of (meth, unmeth) dataframes array_type (string)", "using -2, but work at -0.5. # populate dataframe with", "one of: path -- to a folder with csv data", "created from methylprep.run_pipeline() or methylcheck.load(path, 'meth') tuple of (meth, unmeth)", "So to get what sample_ids these labels correspond to, you", "slower) # the saved pickles function isn't working for batches", "saved pickles function isn't working for batches yet. meth, unmeth", "'X_fail_percent', 'Y_fail_percent'] - color is sex, pink or blue -", "= pd.read_pickle(filename) break if '.csv' in filename.suffixes: loaded_files['meta'] = pd.read_csv(filename)", "pd.Series): LOGGER.warning(f\"Multiple samples matched actual sex for {row.Index}, because Sample_ID", "output, if available file_patterns = { 'sample_sheet_meta_data.pkl': 'meta', '*_meta_data.pkl': 'meta',", "renamed_columns.values(): renamed_column = 'Sex' if renamed_column is not None: #", "else label_lookup[idx] ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center', fontsize=10, color='darkred') else: label", "path. Note: ~90% of Y probes should fail if the", "X or Y if include_probe_failure_percent == True and isinstance(poobah, pd.DataFrame):", "age) - sex mismatches are X, matched samples are circles", "the sample fails. Default is 20 (percent) Has no effect", "x < median_cutoff else 'M' for x in median_difference] #", "False. plot True: creates a plot, with option to `save`", "sample_ids and these labels, to embed in a PDF report", "# get median values for each sex chromosome for each", "meta data to compare.\") if isinstance(actual_sex, pd.Series): LOGGER.warning(f\"Multiple samples matched", "- color is sex, pink or blue - marker circle", "MANIFEST_DIR_PATH_LAMBDA) and auto-detected array here array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda)) elif", "= 1 if actual_sex.upper() == str(row.predicted_sex).upper() else 0 else: sex_matches", "fontsize=10, color='grey') if return_labels: plt.close() # release memory return label_lookup", "alt column names and gets replaced. if any(loaded_files['meta'].columns.duplicated()): loaded_files['meta'] =", "containers (which are both slower) # the saved pickles function", "from meta data to compare.\") if isinstance(actual_sex, pd.Series): LOGGER.warning(f\"Multiple samples", ".load; get_sex uses methylprep models too and detect_array() import logging", "def get_sex(data_source, array_type=None, verbose=False, plot=False, save=False, on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True,", "percent of sample probes that can fail before the sample", "can also pass in a data_containers object, a list of", "using first match, so matches may not be accurate.\") actual_sex", "CSV columns, for example) If a `poobah_values.pkl` file can be", "{len(unmeth)}\") if array_type == None: # get list of X", "'.pkl' in filename.suffixes: loaded_files['meta'] = pd.read_pickle(filename) break if '.csv' in", "if '.csv' in filename.suffixes: loaded_files['meta'] = pd.read_csv(filename) break if len(loaded_files)", "labels for samples that have LOW failure rates, but shows", "customize the import if your files were not prepared using", "median_cutoff= -2, include_probe_failure_percent=True, verbose=False, save=False, poobah_cutoff=20, #% custom_label=None, data_source_type=None, data_source=None,", "is a \"custom_label\" dict passed in, such as (actual_age -", "'male' in sex_values or 'female' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M',", "'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'] - color is sex, pink or", "on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True, poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False): \"\"\"This will", "samples and labels - show delta age on labels (using", "N_failed = len(data[data['sample_failure_percent'] > poobah_cutoff].index) N_total = len(data['sample_failure_percent'].index) ax.set_title(f\"{N_failed} of", "Sample_ID column, or (Sentrix_ID and Sentrix_Position) columns.\") # fixing case", "loaded_files['meta'].columns} loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns) if 'Gender' in renamed_columns.values(): renamed_column =", "(meth, unmeth) = data_source if len(meth) != len(unmeth): raise ValueError(f\"WARNING:", "contain one sex if 'Male' in sex_values or 'Female' in", "predict the sex of each sample. inputs: ======= the \"data_source\"", "to return copy number. requires dataframes of methylated and unmethylated", "sample sheet must have a Sample_ID column, or (Sentrix_ID and", "in median_difference] # NOTE for testing: GSE85566/GPL13534 (N=120) has 4", "pickles function isn't working for batches yet. meth, unmeth =", "here array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda)) elif isinstance(array_type,str): if array_type in", "a folder also containing samplesheet pkl and poobah_values.pkl, if you", "'predicted_sexes.png' if data_source_type != 'path' else Path(data_source,'predicted_sexes.png').expanduser() plt.savefig(filepath, bbox_inches=\"tight\") if", "pass it in.\") raise KeyError(\"Could not read actual sex from", "pickles function isn't working for batches yet. try: meth, unmeth", "- adds legend of sketchy samples and labels - show", "subset, because samples might only contain one sex if 'Male'", "probes\") # dataframes of meth and unmeth values for the", "and Y probe copy numbers to assign male or female", "with at least {poobah_cutoff}% of probes failing\") else: ax.set_title(f\"Predicted sex", "already loaded. Just passes in meth/unmeth data # Sample sheet", "loaded_files['meta']['Sentrix_Position'].astype(str) loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') else: raise ValueError(\"Your sample sheet must", ">= p_value_cutoff].index) / len(poobah.index),1) failed_probe_names = poobah[column][poobah[column] >= p_value_cutoff].index failed_x_probe_names", "in output.columns: LOGGER.warning(\"Sample_ID was another column in your output DataFrame;", "= output.round(1) # if poobah_df exists, calculate percent X and", "(data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source) # data_source_type is one of {'path',", "a probability cloud. - sample text is (ID, delta age)", "loaded_files['meta'].columns or 'Sex' in loaded_files['meta'].columns): if 'Gender' in loaded_files['meta'].columns: renamed_column", "= _get_copy_number(x_meth,x_unmeth).median() y_med = _get_copy_number(y_meth,y_unmeth).median() # populate output dataframe with", "f\"{label_lookup[idx]}, {custom_label.get(idx)}\" if isinstance(custom_label, dict) and custom_label.get(idx) else label_lookup[idx] ax.text(row['x_median'],", "labels on samples on plot to make it easier to", "merge actual sex into processed output, if available file_patterns =", "import if your files were not prepared using methylprep (non-standand", "x_unmeth = unmeth[unmeth.index.isin(x_probes)] y_meth = meth[meth.index.isin(y_probes)] y_unmeth = unmeth[unmeth.index.isin(y_probes)] #", "be one of these: {allowed_array_types} or None.\") if verbose: LOGGER.debug(array_type)", "fig plt.show() def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output): \"\"\"output is a dataframe with", "actual sex. data_containers -- object created from methylprep.run_pipeline() or methylcheck.load(path,", "from pathlib import Path #app import methylcheck # uses .load;", "(40,40) custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7'])) # if only one sex, make", "repeats in sample sheets. Only using first match, so matches", "lets you customize the import if your files were not", "percent X and Y probes that failed sample_failure_percent = {}", "sex from meta data to compare.\") if isinstance(actual_sex, pd.Series): LOGGER.warning(f\"Multiple", "one of {'path', 'container', 'control', 'meth_unmeth_tuple'} poobah=None if data_source_type in", "raise ValueError(f\"Your array_type must be one of these: {allowed_array_types} or", "if isinstance(custom_label, dict) and custom_label.get(idx) else label_lookup[idx] ax.text(row['x_median'], row['y_median'], label,", "loaded_files['meta'].set_index('Sample_ID') else: raise ValueError(\"Your sample sheet must have a Sample_ID", "predicted sex. # merge actual sex into processed output, if", "pd.read_csv(filename) break if len(loaded_files) == 1: # methylprep v1.5.4-6 was", "= loaded_files['meta'].set_index('Sample_ID') else: raise ValueError(\"Your sample sheet must have a", "yet. meth, unmeth = methylcheck.qc_plot._get_data( data_containers=data_source, path=None, compare=False, noob=False, verbose=False)", "'*_meta_data.pkl': 'meta', '*samplesheet*.csv': 'meta', '*sample_sheet*.csv': 'meta', } loaded_files = {}", "height=8, aspect=1.34) ax = fig.axes[0,0] fig.fig.subplots_adjust(top=.95) # for zoomed-in plots", "folder with csv data that contains processed sample data path", "['F' if x < median_cutoff else 'M' for x in", "in meth/unmeth data # Sample sheet should have 'M' or", "probes.\") if save: filepath = 'predicted_sexes.png' if data_source_type != 'path'", "in row and row['sample_failure_percent'] > poobah_cutoff: label = f\"{label_lookup[idx]}, {custom_label.get(idx)}\"", "if (i <= 26) else str(i-26) for i,index_val in enumerate(data.index)}", "filename.suffixes: loaded_files['meta'] = pd.read_pickle(filename) break if '.csv' in filename.suffixes: loaded_files['meta']", "at -0.5. # populate dataframe with predicted sex output['predicted_sex'] =", "Note: return_fig will not show a plot on screen. return_labels:", "def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output): \"\"\"output is a dataframe with Sample_ID in", "column to match predicted sex. # merge actual sex into", "unmeth = methylcheck.qc_plot._get_data( data_containers=data_source, path=None, compare=False, noob=False, verbose=False) elif data_source_type", "= loaded_files['meta'].rename(columns=renamed_columns) if 'Gender' in renamed_columns.values(): renamed_column = 'Gender' elif", "corrected\"\"\" # minfi R version: # log2(getMeth(object) + getUnmeth(object)) return", "and fix here: # methylcheck 0.7.9 / prep 1.6.0 meta_data", "returns a lookup dict instead of plot if there is", "can be manipulated by user --- default = -2 ---", "output['X_fail_percent'] = X_col #output.index.map(X_col) output['Y_fail_percent'] = Y_col #output.index.map(Y_col) if failed_samples", "# fixing case of the relevant column renamed_column = None", "align with output data index\") #sns.set_theme(style=\"white\") show_mismatches = None if", "in, instead of a path. Note: ~90% of Y probes", "report if you like. custom_label: Option to provide a dictionary", "can be found in path, the dataframe returned will also", "index. \"\"\" if sample_failure_percent != {} and set(sample_failure_percent.keys()) == set(data.index):", "or F): (found {sex_values})\") output['actual_sex'] = None output['sex_matches'] = None", "import Path #app import methylcheck # uses .load; get_sex uses", "= (20, 600) if show_failure: # avoid sizing dots with", "set(data.index): data['sample_failure_percent'] = pd.Series(sample_failure_percent) else: LOGGER.warning(\"sample_failure_percent index did not align", "plot == True) When using poobah_cutoff, the figure only includes", "show_mismatches: data[\"sex_matches\"] = data[\"sex_matches\"].map({0:\"Mismatch\", 1:\"Match\"}) show_failure = None if 'sample_failure_percent'", "match the data DF index. \"\"\" if sample_failure_percent != {}", "include_probe_failure_percent == True and Path(data_source,'poobah_values.pkl').expanduser().exists(): poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser()) elif data_source_type", "GSE85566/GPL13534 (N=120) has 4 samples that are predicted as wrong", "'meth_values.pkl' and 'unmeth_values.pkl' dataframes path -- to a folder also", "Y probes should fail if the sample is female. That", "(string) enum: {'27k','450k','epic','epic+','mouse'} if not specified, it will load the", "None sample_sizes = (40,40) custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7'])) # if only", "X_col #output.index.map(X_col) output['Y_fail_percent'] = Y_col #output.index.map(Y_col) if failed_samples != []:", "was creating meta_data files with two Sample_ID columns. Check and", "This feature won't work if a containers object or tuple", "<reponame>FoxoTech/methylcheck<filename>methylcheck/predict/sex.py import pandas as pd import numpy as np import", "samples to the multi-dimensional QC plot while providing a filepath", "= ['F' if x < median_cutoff else 'M' for x", "!= {} and set(sample_failure_percent.keys()) == set(data.index): data['sample_failure_percent'] = pd.Series(sample_failure_percent) else:", "control, and warn the user if any did. This feature", "output['x_median'] = output.index.map(x_med) output['y_median'] = output.index.map(y_med) # compute difference median_difference", "filename.suffixes: loaded_files['meta'] = pd.read_csv(filename) break if len(loaded_files) == 1: #", "are worse, smaller and darker if low variance. Like a", "[] for column in poobah.columns: sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index)", "can be raw OR corrected\"\"\" # minfi R version: #", "these: {allowed_array_types} or None.\") if verbose: LOGGER.debug(array_type) LOGGER.setLevel(logging.WARNING) manifest =", "= None if 'sex_matches' not in data.columns else \"sex_matches\" if", "data_source_type in ('path'): # this will look for saved pickles", "(which are both slower) # the saved pickles function isn't", "assign male or female (copied from the minfi sex predict", "failed_samples = [] for column in poobah.columns: sample_failure_percent[column] = round(100*len(poobah[column][poobah[column]", "circles (if samplesheet contains actual sex data) - omits labels", "failed poobah, with at least {poobah_cutoff}% of probes failing\") else:", "data_containers object, a list of data_containers containing raw meth/unmeth values,", "values, instead. This object is produced by methylprep.run_pipeline, or by", "sex predict function) include_probe_failure_percent: True: includes poobah percent per sample", "get list of X any Y probes - using .methylprep_manifest_files", "= methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=True, verbose=False) if include_probe_failure_percent ==", "blue; female is pink # if hasattr(output, 'actual_sex') and set(output.actual_sex)", "file_pattern in file_patterns: for filename in Path(filepath).expanduser().rglob(file_pattern): if '.pkl' in", "in ('path'): output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output) if plot == True:", "_plot_predicted_sex(data=output, # 'x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent' sample_failure_percent=sample_failure_percent, median_cutoff=median_cutoff, include_probe_failure_percent=include_probe_failure_percent,", "Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom', '27k', '450k', 'epic', 'epic+' LOGGER.setLevel(logging.INFO)", "manipulated by user --- default = -2 --- used to", "sample_sizes = (20, 600) if show_failure: # avoid sizing dots", "sex_values or 'FEMALE' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'}) elif", "chromosomes that failed quality control, and warn the user if", "'Female':'F'}) elif 'male' in sex_values or 'female' in sex_values: loaded_files['meta'][renamed_column]", "True: creates a plot, with option to `save` as image", "output['predicted_sex'] = sex0 output = output.round(1) # if poobah_df exists,", "function - save_fig - return_labels, returns a lookup dict instead", "'Sentrix_ID' in loaded_files['meta'].columns and 'Sentrix_Position' in loaded_files['meta'].columns: loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str)", "as a column and returns it.\"\"\" # controls_report() does the", "'F' in column to match predicted sex. # merge actual", "= unmeth[unmeth.index.isin(y_probes)] # create empty dataframe for output output =", "these are a lookup dictionary of labels if return_fig: return", "predicted_age), it simply adds those this label to the marker", "-- object created from methylprep.run_pipeline() or methylcheck.load(path, 'meth') tuple of", "fails. Default is 20 (percent) Has no effect if `include_probe_failure_percent`", "for testing: GSE85566/GPL13534 (N=120) has 4 samples that are predicted", "'path' else Path(data_source,'predicted_sexes.png').expanduser() plt.savefig(filepath, bbox_inches=\"tight\") if return_fig: return fig plt.show()", "path -- to a folder also containing samplesheet pkl and", "as pd import numpy as np import matplotlib.pyplot as plt", "failed_x_probe_names = list(set(failed_probe_names) & set(x_probes)) failed_y_probe_names = list(set(failed_probe_names) & set(y_probes))", "xscale = plt.gca().get_xlim() if abs(yscale[1]-yscale[0]) < 2.0: ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1) ax.set_ylim(ymin=yscale[0]-1,", "plt import seaborn as sns from pathlib import Path #app", "X any Y probes - using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and", "using poobah_cutoff, the figure only includes A-Z,1...N labels on samples", "uses methylprep models too and detect_array() import logging LOGGER =", "= loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()] LOGGER.info(\"Removed a duplicate Sample_ID column in samplesheet\")", "in loaded_files['meta'].columns: loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) + '_' + loaded_files['meta']['Sentrix_Position'].astype(str) loaded_files['meta']", "csvs or parsing the containers (which are both slower) #", "LOGGER.info(\"Removed a duplicate Sample_ID column in samplesheet\") if 'Sample_ID' in", "manifest.index[manifest['CHR']=='Y'] if verbose: LOGGER.info(f\"Found {len(x_probes)} X and {len(y_probes)} Y probes\")", "= methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=False, verbose=False) except Exception as", "path -- to a folder with the 'meth_values.pkl' and 'unmeth_values.pkl'", "ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda)) elif isinstance(array_type,str): if array_type in allowed_array_types: array_type =", "that controls_report still works with this function - save_fig -", "1:\"Match\"}) show_failure = None if 'sample_failure_percent' not in data.columns else", "quality control, and warn the user if any did. This", "DF index. \"\"\" if sample_failure_percent != {} and set(sample_failure_percent.keys()) ==", "in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'}) elif 'm' in sex_values", "10: failed_samples.append(column) output['X_fail_percent'] = X_col #output.index.map(X_col) output['Y_fail_percent'] = Y_col #output.index.map(Y_col)", "not understood (expecting M or F): (found {sex_values})\") output['actual_sex'] =", "logging.basicConfig(level=logging.INFO) def _get_copy_number(meth,unmeth): \"\"\"function to return copy number. requires dataframes", "the saved pickles function isn't working for batches yet. meth,", "you can rerun the function with return_labels=True and it will", "dataframe for output output = pd.DataFrame(index=[s for s in meth.columns],", "in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'}) elif 'MALE' in sex_values", "populate dataframe with predicted sex output['predicted_sex'] = sex0 output =", "chromosome for each sample x_med = _get_copy_number(x_meth,x_unmeth).median() y_med = _get_copy_number(y_meth,y_unmeth).median()", "chromosome is missing.\"\"\" allowed_array_types = {'27k','450k','epic','epic+','mouse'} try: from methylprep.files import", "the data DF index. \"\"\" if sample_failure_percent != {} and", "if include_probe_failure_percent == True and isinstance(poobah, pd.DataFrame): p_value_cutoff = 0.05", "get_sex uses methylprep models too and detect_array() import logging LOGGER", "methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=True, verbose=False) if include_probe_failure_percent == True", "else Path(data_source,'predicted_sexes.png').expanduser() plt.savefig(filepath, bbox_inches=\"tight\") if return_fig: return fig plt.show() def", "the user if any did. This feature won't work if", "accurate.\") actual_sex = actual_sex[0] if hasattr(row,'predicted_sex'): sex_matches = 1 if", "A-Z,1...N labels on samples on plot to make it easier", "> poobah_cutoff: label = f\"{label_lookup[idx]}, {custom_label.get(idx)}\" if isinstance(custom_label, dict) and", "meta_data files with two Sample_ID columns. Check and fix here:", "x_med = _get_copy_number(x_meth,x_unmeth).median() y_med = _get_copy_number(y_meth,y_unmeth).median() # populate output dataframe", "is male, change palette if hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0] ==", "str(i-26) for i,index_val in enumerate(data.index)} for idx,row in data.iterrows(): if", "dots with narrow range; gives false impression of bad samples.", "plot is True return_fig If True, returns a pyplot figure", "Option to provide a dictionary with keys as sample_ids and", "output def _plot_predicted_sex(data=pd.DataFrame(), sample_failure_percent={}, median_cutoff= -2, include_probe_failure_percent=True, verbose=False, save=False, poobah_cutoff=20,", "sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'}) elif 'male' in sex_values or", "meth/unmeth data # Sample sheet should have 'M' or 'F'", "and auto-detected array here array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda)) elif isinstance(array_type,str):", "if poobah values are worse, smaller and darker if low", "poobah_cutoff=poobah_cutoff, custom_label=custom_label, data_source_type=data_source_type, data_source=data_source, return_fig=return_fig, return_labels=return_labels, ) if return_labels: return", "when you pass it in.\") raise KeyError(\"Could not read actual", "'Sex' if renamed_column is not None: # next, ensure samplesheet", "else: raise ValueError(f\"Cannot compare with predicted sex because actual sexes", "with keys as sample_ids and values as labels to apply", "by using methylcheck.load(filepath, format='meth') and lets you customize the import", "if data_source_type in ('path'): output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output) if plot", "calculate percent X and Y probes that failed sample_failure_percent =", "custom_label=None, data_source_type=None, data_source=None, return_fig=False, return_labels=False): \"\"\" data columns: ['x_median', 'y_median',", "or by using methylcheck.load(filepath, format='meth') and lets you customize the", "show delta age on labels (using custom column dict) -", "= loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'}) elif 'm' in sex_values or 'f' in", "LOGGER.warning(f\"{failed_samples}\") if data_source_type in ('path'): output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output) if", "actual_sex output.loc[row.Index, 'sex_matches'] = sex_matches else: pass # no Sex/Gender", "if array_type == None: # get list of X any", "elif isinstance(array_type,str): if array_type in allowed_array_types: array_type = ArrayType(array_type) else:", "with option to `save` as image or `return_fig`. save True:", "sample fails. Default is 20 (percent) Has no effect if", "object or tuple of dataframes is passed in, instead of", "meth {len(meth)} -- unmeth {len(unmeth)}\") if array_type == None: #", "sex, make sure male is blue; female is pink #", "of these: {allowed_array_types} or None.\") if verbose: LOGGER.debug(array_type) LOGGER.setLevel(logging.WARNING) manifest", "has 4 samples that are predicted as wrong sex when", "pd import numpy as np import matplotlib.pyplot as plt import", "saves the plot, if plot is True return_fig If True,", "data_containers=None, path=data_source, compare=False, noob=True, verbose=False) if include_probe_failure_percent == True and", "if isinstance(custom_label, dict) else None if label: ax.text(row['x_median']+0.05, row['y_median']+0.05, label,", "sheets. Only using first match, so matches may not be", "fig return output def _plot_predicted_sex(data=pd.DataFrame(), sample_failure_percent={}, median_cutoff= -2, include_probe_failure_percent=True, verbose=False,", "not None: # next, ensure samplesheet Sex/Gender (Male/Female) are recoded", "figure instead of a dataframe. Default is False. Note: return_fig", "#print('sex_values', sex_values) if not sex_values.issubset(set(['M','F'])): # subset, because samples might", "to make it easier to read. So to get what", "'custom', '27k', '450k', 'epic', 'epic+' LOGGER.setLevel(logging.INFO) x_probes = manifest.index[manifest['CHR']=='X'] y_probes", "'FEMALE':'F'}) elif 'm' in sex_values or 'f' in sex_values: loaded_files['meta'][renamed_column]", "return fig # these are a lookup dictionary of labels", "to compare.\") if isinstance(actual_sex, pd.Series): LOGGER.warning(f\"Multiple samples matched actual sex", "parsing the containers (which are both slower) # the saved", "custom_label: Option to provide a dictionary with keys as sample_ids", "= methylcheck.load_processed._data_source_type(data_source) # data_source_type is one of {'path', 'container', 'control',", "return_labels, returns a lookup dict instead of plot if there", "= len(data['sample_failure_percent'].index) ax.set_title(f\"{N_failed} of {N_total} samples failed poobah, with at", "for file_pattern in file_patterns: for filename in Path(filepath).expanduser().rglob(file_pattern): if '.pkl'", "'Gender' elif 'Sex' in loaded_files['meta'].columns: renamed_column = 'Sex' else: renamed_columns", "exists, calculate percent X and Y probes that failed sample_failure_percent", "== 'M': custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89'])) fig = sns.relplot(data=data, x='x_median', y='y_median',", ") if return_labels: return fig # these are a lookup", "object, a list of data_containers containing raw meth/unmeth values, instead.", "sample x_med = _get_copy_number(x_meth,x_unmeth).median() y_med = _get_copy_number(y_meth,y_unmeth).median() # populate output", "verbose=False, plot=False, save=False, on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True, poobah_cutoff=20, custom_label=None, return_fig=False,", "next, ensure samplesheet Sex/Gender (Male/Female) are recoded as M/F; controls_report()", "faded if poobah values are worse, smaller and darker if", "return_labels=True and it will skip plotting and just return a", "list of X any Y probes - using .methylprep_manifest_files (or", "600) if show_failure: # avoid sizing dots with narrow range;", "sample sheets. Only using first match, so matches may not", "and lets you customize the import if your files were", "if 'sample_failure_percent' not in data.columns else \"sample_failure_percent\" sample_sizes = (20,", "units. yscale = plt.gca().get_ylim() xscale = plt.gca().get_xlim() if abs(yscale[1]-yscale[0]) <", "str(row.predicted_sex).upper() else 0 else: sex_matches = np.nan output.loc[row.Index, 'actual_sex'] =", "= { 'sample_sheet_meta_data.pkl': 'meta', '*_meta_data.pkl': 'meta', '*samplesheet*.csv': 'meta', '*sample_sheet*.csv': 'meta',", "actual_sex as a column and returns it.\"\"\" # controls_report() does", "= loaded_files['meta'].set_index('Sample_ID') elif 'Sentrix_ID' in loaded_files['meta'].columns and 'Sentrix_Position' in loaded_files['meta'].columns:", "(copied from the minfi sex predict function) include_probe_failure_percent: True: includes", "includes poobah percent per sample as column in the output", "{custom_label.get(idx)}\" if isinstance(custom_label, dict) and custom_label.get(idx) else label_lookup[idx] ax.text(row['x_median'], row['y_median'],", "# data_source_type is one of {'path', 'container', 'control', 'meth_unmeth_tuple'} poobah=None", "not align with output data index\") #sns.set_theme(style=\"white\") show_mismatches = None", "# methylcheck 0.7.9 / prep 1.6.0 meta_data lacking Sample_ID when", "from the minfi sex predict function) include_probe_failure_percent: True: includes poobah", "be unreliable:\") LOGGER.warning(f\"{failed_samples}\") if data_source_type in ('path'): output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source,", "set the min scale to be at least 2 units.", "= round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1) Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1) X_col.append(X_percent) Y_col.append(Y_percent) if X_percent >", "the index when you pass it in.\") raise KeyError(\"Could not", "horizontalalignment='center', fontsize=10, color='darkred') else: label = f\"{custom_label.get(idx)}\" if isinstance(custom_label, dict)", "but should. sex_values = set(loaded_files['meta'][renamed_column].unique()) #print('sex_values', sex_values) if not sex_values.issubset(set(['M','F'])):", "like. custom_label: Option to provide a dictionary with keys as", "round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1) X_col.append(X_percent) Y_col.append(Y_percent) if X_percent > 10: failed_samples.append(column) output['X_fail_percent'] =", "data) - omits labels for samples that have LOW failure", "of data to be fast, because these are already loaded.", "elif 'Sentrix_ID' in loaded_files['meta'].columns and 'Sentrix_Position' in loaded_files['meta'].columns: loaded_files['meta']['Sample_ID'] =", "for batches yet. try: meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source,", "the \"data_source\" can be any one of: path -- to", "to a folder with csv data that contains processed sample", "X and Y probes that failed sample_failure_percent = {} #", "just return a dictionary with sample_ids and these labels, to", "save True: saves the plot, if plot is True return_fig", "sns.set_palette(sns.color_palette(['#FE6E89','#0671B7'])) # if only one sex, make sure male is", "to `save` as image or `return_fig`. save True: saves the", "M or F): (found {sex_values})\") output['actual_sex'] = None output['sex_matches'] =", "create empty dataframe for output output = pd.DataFrame(index=[s for s", "data[\"sex_matches\"] = data[\"sex_matches\"].map({0:\"Mismatch\", 1:\"Match\"}) show_failure = None if 'sample_failure_percent' not", "Y chromosomes that failed quality control, and warn the user", "'epic+' LOGGER.setLevel(logging.INFO) x_probes = manifest.index[manifest['CHR']=='X'] y_probes = manifest.index[manifest['CHR']=='Y'] if verbose:", "Y_col.append(Y_percent) if X_percent > 10: failed_samples.append(column) output['X_fail_percent'] = X_col #output.index.map(X_col)", "data[\"sex_matches\"].map({0:\"Mismatch\", 1:\"Match\"}) show_failure = None if 'sample_failure_percent' not in data.columns", "there is a \"custom_label\" dict passed in, such as (actual_age", "is a dataframe with Sample_ID in the index. This adds", "your samplesheet are not understood (expecting M or F): (found", "else: renamed_columns = {col:(col.title() if col.lower() in ('sex','gender') else col)", "about samples to the multi-dimensional QC plot while providing a", "can rerun the function with return_labels=True and it will skip", "together, set the min scale to be at least 2", "requires dataframes of methylated and unmethylated values. can be raw", "you want to compare predicted sex with actual sex. data_containers", "fail before the sample fails. Default is 20 (percent) Has", "make sure male is blue; female is pink # if", "labels, to embed in a PDF report if you like.", "to assign male or female (copied from the minfi sex", "if verbose: LOGGER.debug(array_type) LOGGER.setLevel(logging.WARNING) manifest = Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame #", "(if samplesheet contains actual sex data) - omits labels for", "fig.axes[0,0] fig.fig.subplots_adjust(top=.95) # for zoomed-in plots with few points close", "in sex_values or 'female' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'})", "output = output.round(1) # if poobah_df exists, calculate percent X", "that to the index when you pass it in.\") raise", "= manifest.index[manifest['CHR']=='X'] y_probes = manifest.index[manifest['CHR']=='Y'] if verbose: LOGGER.info(f\"Found {len(x_probes)} X", "'female' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'}) elif 'MALE' in", "#app import methylcheck # uses .load; get_sex uses methylprep models", "sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'}) elif 'MALE' in sex_values or", "output) if plot == True: fig = _plot_predicted_sex(data=output, # 'x_median',", "import seaborn as sns from pathlib import Path #app import", "figure only includes A-Z,1...N labels on samples on plot to", "and it will skip plotting and just return a dictionary", "renamed_column = 'Sex' else: renamed_columns = {col:(col.title() if col.lower() in", "sexes listed in your samplesheet are not understood (expecting M", "== True) When using poobah_cutoff, the figure only includes A-Z,1...N", "{sex_values})\") output['actual_sex'] = None output['sex_matches'] = None for row in", "label_lookup = {index_val: chr(i+65) if (i <= 26) else str(i-26)", "easiest way, you can also pass in a data_containers object,", "sex_matches else: pass # no Sex/Gender column found in samplesheet", "& set(x_probes)) failed_y_probe_names = list(set(failed_probe_names) & set(y_probes)) X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1)", "to include poobah in plots. poobah_cutoff The maximum percent of", "tests with custom label and without, and check that controls_report", "# next, ensure samplesheet Sex/Gender (Male/Female) are recoded as M/F;", "'meta', } loaded_files = {} for file_pattern in file_patterns: for", "if label: ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center', fontsize=10, color='grey') if return_labels:", "data_source=None, return_fig=False, return_labels=False): \"\"\" data columns: ['x_median', 'y_median', 'predicted_sex', 'X_fail_percent',", "the figure only includes A-Z,1...N labels on samples on plot", "{col:(col.title() if col.lower() in ('sex','gender') else col) for col in", "in a data_containers object, a list of data_containers containing raw", "False. Note: return_fig will not show a plot on screen.", "cloud. - sample text is (ID, delta age) - sex", "data.columns else \"sample_failure_percent\" sample_sizes = (20, 600) if show_failure: #", "in path, the dataframe returned will also include percent of", "for i,index_val in enumerate(data.index)} for idx,row in data.iterrows(): if \"sample_failure_percent\"", "from methylprep.files import Manifest from methylprep.models import ArrayType except ImportError:", "batches yet. try: meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False,", "+ loaded_files['meta']['Sentrix_Position'].astype(str) loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') else: raise ValueError(\"Your sample sheet", "zoomed-in plots with few points close together, set the min", "with two Sample_ID columns. Check and fix here: # methylcheck", "and unmethylated values. can be raw OR corrected\"\"\" # minfi", "by user --- default = -2 --- used to predict", "labels correspond to, you can rerun the function with return_labels=True", "loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'}) else: raise ValueError(f\"Cannot compare with predicted", "if you like. custom_label: Option to provide a dictionary with", "ax = fig.axes[0,0] fig.fig.subplots_adjust(top=.95) # for zoomed-in plots with few", "in sex_values or 'f' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'})", "memory return label_lookup if \"sample_failure_percent\" in data.columns: N_failed = len(data[data['sample_failure_percent']", "NOTE for testing: GSE85566/GPL13534 (N=120) has 4 samples that are", "{poobah_cutoff}% of probes failing\") else: ax.set_title(f\"Predicted sex based on matching", "that contains processed sample data path -- to a folder", "None output['sex_matches'] = None for row in output.itertuples(): try: actual_sex", "+ getUnmeth(object)) return np.log2(meth+unmeth) def get_sex(data_source, array_type=None, verbose=False, plot=False, save=False,", "import logging LOGGER = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def _get_copy_number(meth,unmeth): \"\"\"function to", "that failed quality control, and warn the user if any", "fig = sns.relplot(data=data, x='x_median', y='y_median', hue=\"predicted_sex\", size=show_failure, style=show_mismatches, sizes=sample_sizes, alpha=.5,", "or tuple of dataframes is passed in, instead of a", "= loaded_files['meta']['Sentrix_ID'].astype(str) + '_' + loaded_files['meta']['Sentrix_Position'].astype(str) loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') else:", "the index. This adds actual_sex as a column and returns", "chromosomes x_meth = meth[meth.index.isin(x_probes)] x_unmeth = unmeth[unmeth.index.isin(x_probes)] y_meth = meth[meth.index.isin(y_probes)]", "if return_labels: plt.close() # release memory return label_lookup if \"sample_failure_percent\"", "methylprep.run_pipeline, or by using methylcheck.load(filepath, format='meth') and lets you customize", "of (meth, unmeth) dataframes array_type (string) enum: {'27k','450k','epic','epic+','mouse'} if not", "match predicted sex. # merge actual sex into processed output,", "'meth_unmeth_tuple': (meth, unmeth) = data_source if len(meth) != len(unmeth): raise", "# if poobah_df exists, calculate percent X and Y probes", "probes should fail if the sample is female. That chromosome", "label = f\"{custom_label.get(idx)}\" if isinstance(custom_label, dict) else None if label:", "renamed_column = None if ('Gender' in loaded_files['meta'].columns or 'Sex' in", "0.05 X_col = [] Y_col = [] failed_samples = []", "for each sample x_med = _get_copy_number(x_meth,x_unmeth).median() y_med = _get_copy_number(y_meth,y_unmeth).median() #", "dictionary of labels if return_fig: return fig return output def", "median_cutoff the minimum difference in the medians of X and", "= None if 'sample_failure_percent' not in data.columns else \"sample_failure_percent\" sample_sizes", "round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) / len(poobah.index),1) failed_probe_names = poobah[column][poobah[column] >= p_value_cutoff].index", "+ '_' + loaded_files['meta']['Sentrix_Position'].astype(str) loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') else: raise ValueError(\"Your", "sample_sheet uses alt column names and gets replaced. if any(loaded_files['meta'].columns.duplicated()):", "array_type must be one of these: {allowed_array_types} or None.\") if", "and list(data.predicted_sex)[0] == 'M': custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89'])) fig = sns.relplot(data=data,", "color='darkred') else: label = f\"{custom_label.get(idx)}\" if isinstance(custom_label, dict) else None", "Path #app import methylcheck # uses .load; get_sex uses methylprep", "enum: {'27k','450k','epic','epic+','mouse'} if not specified, it will load the data", "using methylprep (non-standand CSV columns, for example) If a `poobah_values.pkl`", "labels if return_fig: return fig return output def _plot_predicted_sex(data=pd.DataFrame(), sample_failure_percent={},", "and determine the array for you. median_cutoff the minimum difference", "is (ID, delta age) - sex mismatches are X, matched", "# get list of X any Y probes - using", "on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom', '27k', '450k', 'epic', 'epic+' LOGGER.setLevel(logging.INFO) x_probes", "in.\") raise KeyError(\"Could not read actual sex from meta data", "= list(set(failed_probe_names) & set(x_probes)) failed_y_probe_names = list(set(failed_probe_names) & set(y_probes)) X_percent", "minimum difference in the medians of X and Y probe", "probe detection. Predictions for these may be unreliable:\") LOGGER.warning(f\"{failed_samples}\") if", "probes for X and Y chromosomes that failed quality control,", "else: ax.set_title(f\"Predicted sex based on matching X and Y probes.\")", "to predict sex sex0 = ['F' if x < median_cutoff", "are X, matched samples are circles (if samplesheet contains actual", "True: includes poobah percent per sample as column in the", "or 'F' in column to match predicted sex. # merge", "Sample_ID columns. Check and fix here: # methylcheck 0.7.9 /", "fix here: # methylcheck 0.7.9 / prep 1.6.0 meta_data lacking", "providing a filepath is the easiest way, you can also", "array_type == None: # get list of X any Y", "for output output = pd.DataFrame(index=[s for s in meth.columns], columns=['x_median','y_median','predicted_sex'])", "poobah_values.pkl, if you want to compare predicted sex with actual", "compare predicted sex with actual sex. data_containers -- object created", "= fig.axes[0,0] fig.fig.subplots_adjust(top=.95) # for zoomed-in plots with few points", "= {col:(col.title() if col.lower() in ('sex','gender') else col) for col", "x in median_difference] # NOTE for testing: GSE85566/GPL13534 (N=120) has", "== True: fig = _plot_predicted_sex(data=output, # 'x_median', 'y_median', 'predicted_sex', 'X_fail_percent',", "in sample sheets. Only using first match, so matches may", "# the saved pickles function isn't working for batches yet.", "uses .load; get_sex uses methylprep models too and detect_array() import", "then csvs or parsing the containers (which are both slower)", "save=save, poobah_cutoff=poobah_cutoff, custom_label=custom_label, data_source_type=data_source_type, data_source=data_source, return_fig=return_fig, return_labels=return_labels, ) if return_labels:", "failed - adds legend of sketchy samples and labels -", "else col) for col in loaded_files['meta'].columns} loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns) if", "else: sex_matches = np.nan output.loc[row.Index, 'actual_sex'] = actual_sex output.loc[row.Index, 'sex_matches']", "return_labels: plt.close() # release memory return label_lookup if \"sample_failure_percent\" in", "columns=['x_median','y_median','predicted_sex']) # get median values for each sex chromosome for", "save_fig - return_labels, returns a lookup dict instead of plot", "< median_cutoff else 'M' for x in median_difference] # NOTE", "('path'): output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output) if plot == True: fig", "yet. try: meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=False,", "if a containers object or tuple of dataframes is passed", "you customize the import if your files were not prepared", "any one of: path -- to a folder with csv", "'Sex' in renamed_columns.values(): renamed_column = 'Sex' if renamed_column is not", "and darker if low variance. Like a probability cloud. -", "it in.\") raise KeyError(\"Could not read actual sex from meta", "those this label to the marker text labels. Dicts must", "Exception as e: meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False,", "that are predicted as wrong sex when using -2, but", "-- unmeth {len(unmeth)}\") if array_type == None: # get list", "Y probes\") # dataframes of meth and unmeth values for", "keys as sample_ids and values as labels to apply to", "import numpy as np import matplotlib.pyplot as plt import seaborn", "in sex_values or 'Female' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'})", "LOGGER = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def _get_copy_number(meth,unmeth): \"\"\"function to return copy", "X, matched samples are circles (if samplesheet contains actual sex", "actual sexes listed in your samplesheet are not understood (expecting", "should. sex_values = set(loaded_files['meta'][renamed_column].unique()) #print('sex_values', sex_values) if not sex_values.issubset(set(['M','F'])): #", "{row.Index}, because Sample_ID repeats in sample sheets. Only using first", "of dataframes is passed in, instead of a path. Note:", "- output['x_median'] # median cutoff - can be manipulated by", "26) else str(i-26) for i,index_val in enumerate(data.index)} for idx,row in", "and {len(y_probes)} Y probes\") # dataframes of meth and unmeth", "get_sex(data_source, array_type=None, verbose=False, plot=False, save=False, on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True, poobah_cutoff=20,", "include percent of probes for X and Y chromosomes that", "min scale to be at least 2 units. yscale =", "saved pickles first, then csvs or parsing the containers (which", "label, horizontalalignment='center', fontsize=10, color='darkred') else: label = f\"{custom_label.get(idx)}\" if isinstance(custom_label,", "samples. poobah_range = data[\"sample_failure_percent\"].max() - data[\"sample_failure_percent\"].min() if poobah_range < poobah_cutoff/2:", "your files were not prepared using methylprep (non-standand CSV columns,", "Only using first match, so matches may not be accurate.\")", "will look for saved pickles first, then csvs or parsing", "in loaded_files['meta'].columns: loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') elif 'Sentrix_ID' in loaded_files['meta'].columns and", "of X and Y probe copy numbers to assign male", "True: fig = _plot_predicted_sex(data=output, # 'x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'", "str(loaded_files['meta'].loc[row.Index].get(renamed_column)) except KeyError: if 'Sample_ID' in output.columns: LOGGER.warning(\"Sample_ID was another", "minimum of data to be fast, because these are already", "passes in meth/unmeth data # Sample sheet should have 'M'", "if return_fig: return fig plt.show() def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output): \"\"\"output is", "to match predicted sex. # merge actual sex into processed", "{allowed_array_types} or None.\") if verbose: LOGGER.debug(array_type) LOGGER.setLevel(logging.WARNING) manifest = Manifest(array_type,", "include poobah in plots. poobah_cutoff The maximum percent of sample", "these are already loaded. Just passes in meth/unmeth data #", "y_probes = manifest.index[manifest['CHR']=='Y'] if verbose: LOGGER.info(f\"Found {len(x_probes)} X and {len(y_probes)}", "samplesheet Sex/Gender (Male/Female) are recoded as M/F; controls_report() does NOT", "cutoff - can be manipulated by user --- default =", "None: # get list of X any Y probes -", "raise ValueError(f\"WARNING: probe count mismatch: meth {len(meth)} -- unmeth {len(unmeth)}\")", "from methylprep.run_pipeline() or methylcheck.load(path, 'meth') tuple of (meth, unmeth) dataframes", "or 'f' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'}) else: raise", "pkl and poobah_values.pkl, if you want to compare predicted sex", "IDs when failed - adds legend of sketchy samples and", "plt.savefig(filepath, bbox_inches=\"tight\") if return_fig: return fig plt.show() def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output):", "0 else: sex_matches = np.nan output.loc[row.Index, 'actual_sex'] = actual_sex output.loc[row.Index,", "contains actual sex data) - omits labels for samples that", "i,index_val in enumerate(data.index)} for idx,row in data.iterrows(): if \"sample_failure_percent\" in", "'X_fail_percent', 'Y_fail_percent' sample_failure_percent=sample_failure_percent, median_cutoff=median_cutoff, include_probe_failure_percent=include_probe_failure_percent, verbose=verbose, save=save, poobah_cutoff=poobah_cutoff, custom_label=custom_label, data_source_type=data_source_type,", "- using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and auto-detected array here array_type", "for column in poobah.columns: sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) /", "plotted is male, change palette if hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0]", "if include_probe_failure_percent == True and Path(data_source,'poobah_values.pkl').expanduser().exists(): poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser()) elif", "in ('sex','gender') else col) for col in loaded_files['meta'].columns} loaded_files['meta'] =", "= 'Sex' else: renamed_columns = {col:(col.title() if col.lower() in ('sex','gender')", "sex_values) if not sex_values.issubset(set(['M','F'])): # subset, because samples might only", "methylprep (non-standand CSV columns, for example) If a `poobah_values.pkl` file", "label_lookup if \"sample_failure_percent\" in data.columns: N_failed = len(data[data['sample_failure_percent'] > poobah_cutoff].index)", "in loaded_files['meta'].columns} loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns) if 'Gender' in renamed_columns.values(): renamed_column", "< 2.0: ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1) ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1) label_lookup = {index_val: chr(i+65)", "failure rates, but shows IDs when failed - adds legend", "-0.5. # populate dataframe with predicted sex output['predicted_sex'] = sex0", "will be larger and more faded if poobah values are", "not in data.columns else \"sample_failure_percent\" sample_sizes = (20, 600) if", "renamed_column = 'Sex' if renamed_column is not None: # next,", "sex into processed output, if available file_patterns = { 'sample_sheet_meta_data.pkl':", "[]: LOGGER.warning(f\"{len(failed_samples)} samples had >10% of X probes fail p-value", "processed output, if available file_patterns = { 'sample_sheet_meta_data.pkl': 'meta', '*_meta_data.pkl':", "ValueError(f\"WARNING: probe count mismatch: meth {len(meth)} -- unmeth {len(unmeth)}\") if", "ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1) label_lookup = {index_val: chr(i+65) if (i <= 26)", "meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=True, verbose=False) if", "meth, unmeth = methylcheck.qc_plot._get_data( data_containers=data_source, path=None, compare=False, noob=False, verbose=False) elif", "QC plot while providing a filepath is the easiest way,", "ValueError(f\"Your array_type must be one of these: {allowed_array_types} or None.\")", "# if first value to be plotted is male, change", "poobah_cutoff=20, #% custom_label=None, data_source_type=None, data_source=None, return_fig=False, return_labels=False): \"\"\" data columns:", "dict instead of plot if there is a \"custom_label\" dict", "if sample_failure_percent != {} and set(sample_failure_percent.keys()) == set(data.index): data['sample_failure_percent'] =", "'sex_matches'] = sex_matches else: pass # no Sex/Gender column found", "and Y probes that failed sample_failure_percent = {} # %", "2 units. yscale = plt.gca().get_ylim() xscale = plt.gca().get_xlim() if abs(yscale[1]-yscale[0])", "e.g. add more data about samples to the multi-dimensional QC", "renamed_column is not None: # next, ensure samplesheet Sex/Gender (Male/Female)", "# 'x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent' sample_failure_percent=sample_failure_percent, median_cutoff=median_cutoff, include_probe_failure_percent=include_probe_failure_percent, verbose=verbose,", "X and Y chromosomes that failed quality control, and warn", "/ len(poobah.index),1) failed_probe_names = poobah[column][poobah[column] >= p_value_cutoff].index failed_x_probe_names = list(set(failed_probe_names)", "unreliable:\") LOGGER.warning(f\"{failed_samples}\") if data_source_type in ('path'): output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output)", "methylcheck 0.7.9 / prep 1.6.0 meta_data lacking Sample_ID when sample_sheet", "= _get_copy_number(y_meth,y_unmeth).median() # populate output dataframe with values output['x_median'] =", "on matching X and Y probes.\") if save: filepath =", "plot == True: fig = _plot_predicted_sex(data=output, # 'x_median', 'y_median', 'predicted_sex',", "`include_probe_failure_percent` is False. plot True: creates a plot, with option", "sex_values.issubset(set(['M','F'])): # subset, because samples might only contain one sex", "row in output.itertuples(): try: actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column)) except KeyError: if", "embed in a PDF report if you like. custom_label: Option", "col) for col in loaded_files['meta'].columns} loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns) if 'Gender'", "'Male' in sex_values or 'Female' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M',", "it easier to read. So to get what sample_ids these", "== True and isinstance(poobah, pd.DataFrame): p_value_cutoff = 0.05 X_col =", "copy numbers to assign male or female (copied from the", "enumerate(data.index)} for idx,row in data.iterrows(): if \"sample_failure_percent\" in row and", "if return_labels: return fig # these are a lookup dictionary", "minfi sex predict function) include_probe_failure_percent: True: includes poobah percent per", "are recoded as M/F; controls_report() does NOT do this step,", "'predicted_sex') and list(data.predicted_sex)[0] == 'M': custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89'])) fig =", "sheet should have 'M' or 'F' in column to match", "poobah_range = data[\"sample_failure_percent\"].max() - data[\"sample_failure_percent\"].min() if poobah_range < poobah_cutoff/2: show_failure", "list(data.predicted_sex)[0] == 'M': custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89'])) fig = sns.relplot(data=data, x='x_median',", "sex_values or 'female' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'}) elif", "Y probes that failed sample_failure_percent = {} # % of", "probe count mismatch: meth {len(meth)} -- unmeth {len(unmeth)}\") if array_type", "If True, returns a pyplot figure instead of a dataframe.", "None.\") if verbose: LOGGER.debug(array_type) LOGGER.setLevel(logging.WARNING) manifest = Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame", "only includes A-Z,1...N labels on samples on plot to make", "it.\"\"\" # controls_report() does the same thing, and only calls", "True and Path(data_source,'poobah_values.pkl').expanduser().exists(): poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser()) elif data_source_type in ('container'):", "'Y_fail_percent' sample_failure_percent=sample_failure_percent, median_cutoff=median_cutoff, include_probe_failure_percent=include_probe_failure_percent, verbose=verbose, save=save, poobah_cutoff=poobah_cutoff, custom_label=custom_label, data_source_type=data_source_type, data_source=data_source,", "output['y_median'] = output.index.map(y_med) # compute difference median_difference = output['y_median'] -", "return_fig=False, return_labels=False): \"\"\"This will calculate and predict the sex of", "probes fail p-value probe detection. Predictions for these may be", "return_labels: return fig # these are a lookup dictionary of", "allowed_array_types: array_type = ArrayType(array_type) else: raise ValueError(f\"Your array_type must be", "fixing case of the relevant column renamed_column = None if", "you must supply a 'path' as data_source to include poobah", "None for row in output.itertuples(): try: actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column)) except", "= meth[meth.index.isin(x_probes)] x_unmeth = unmeth[unmeth.index.isin(x_probes)] y_meth = meth[meth.index.isin(y_probes)] y_unmeth =", "elif 'Sex' in loaded_files['meta'].columns: renamed_column = 'Sex' else: renamed_columns =", "if ('Gender' in loaded_files['meta'].columns or 'Sex' in loaded_files['meta'].columns): if 'Gender'", "easier to read. So to get what sample_ids these labels", "dictionary with keys as sample_ids and values as labels to", "row and row['sample_failure_percent'] > poobah_cutoff: label = f\"{label_lookup[idx]}, {custom_label.get(idx)}\" if", "- save_fig - return_labels, returns a lookup dict instead of", "poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False): \"\"\"This will calculate and predict the", "import pandas as pd import numpy as np import matplotlib.pyplot", "in column to match predicted sex. # merge actual sex", "percent per sample as column in the output table and", "duplicate Sample_ID column in samplesheet\") if 'Sample_ID' in loaded_files['meta'].columns: loaded_files['meta']", "have 'M' or 'F' in column to match predicted sex.", "meth/unmeth values, instead. This object is produced by methylprep.run_pipeline, or", "folder with the 'meth_values.pkl' and 'unmeth_values.pkl' dataframes path -- to", "a pyplot figure instead of a dataframe. Default is False.", "dataframe with values output['x_median'] = output.index.map(x_med) output['y_median'] = output.index.map(y_med) #", "are not understood (expecting M or F): (found {sex_values})\") output['actual_sex']", "methylcheck.qc_plot._get_data( data_containers=None, path=data_source, compare=False, noob=False, verbose=False) except Exception as e:", "data.columns else \"sex_matches\" if show_mismatches: data[\"sex_matches\"] = data[\"sex_matches\"].map({0:\"Mismatch\", 1:\"Match\"}) show_failure", "with the minimum of data to be fast, because these", "pink or blue - marker circle size will be larger", "data that contains processed sample data path -- to a", "poobah in plots. poobah_cutoff The maximum percent of sample probes", "= ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda)) elif isinstance(array_type,str): if array_type in allowed_array_types: array_type", "return output def _plot_predicted_sex(data=pd.DataFrame(), sample_failure_percent={}, median_cutoff= -2, include_probe_failure_percent=True, verbose=False, save=False,", "% of ALL probes in sample, not just X or", "!= []: LOGGER.warning(f\"{len(failed_samples)} samples had >10% of X probes fail", "noob=False, verbose=False) elif data_source_type == 'meth_unmeth_tuple': (meth, unmeth) = data_source", "not specified, it will load the data from data_source and", "plot on screen. return_labels: (requires plot == True) When using", "failing\") else: ax.set_title(f\"Predicted sex based on matching X and Y", "idx,row in data.iterrows(): if \"sample_failure_percent\" in row and row['sample_failure_percent'] >", "multi-dimensional QC plot while providing a filepath is the easiest", "If a `poobah_values.pkl` file can be found in path, the", "def _get_copy_number(meth,unmeth): \"\"\"function to return copy number. requires dataframes of", "and predict the sex of each sample. inputs: ======= the", "be any one of: path -- to a folder with", "if 'Gender' in loaded_files['meta'].columns: renamed_column = 'Gender' elif 'Sex' in", "of X probes fail p-value probe detection. Predictions for these", "in a PDF report if you like. custom_label: Option to", "'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent' sample_failure_percent=sample_failure_percent, median_cutoff=median_cutoff, include_probe_failure_percent=include_probe_failure_percent, verbose=verbose, save=save, poobah_cutoff=poobah_cutoff,", "points close together, set the min scale to be at", "= poobah[column][poobah[column] >= p_value_cutoff].index failed_x_probe_names = list(set(failed_probe_names) & set(x_probes)) failed_y_probe_names", "plot while providing a filepath is the easiest way, you", "if hasattr(row,'predicted_sex'): sex_matches = 1 if actual_sex.upper() == str(row.predicted_sex).upper() else", "least 2 units. yscale = plt.gca().get_ylim() xscale = plt.gca().get_xlim() if", "only one sex, make sure male is blue; female is", "plot to make it easier to read. So to get", "# for zoomed-in plots with few points close together, set", "loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'}) elif 'm' in sex_values or 'f' in sex_values:", "======= the \"data_source\" can be any one of: path --", "are both slower) # the saved pickles function isn't working", "else 0 else: sex_matches = np.nan output.loc[row.Index, 'actual_sex'] = actual_sex", "calls get_sex() with the minimum of data to be fast,", "on the plot. Note: you must supply a 'path' as", "True return_fig If True, returns a pyplot figure instead of", "#sns.set_theme(style=\"white\") show_mismatches = None if 'sex_matches' not in data.columns else", "matched samples are circles (if samplesheet contains actual sex data)", "may be unreliable:\") LOGGER.warning(f\"{failed_samples}\") if data_source_type in ('path'): output =", "index\") #sns.set_theme(style=\"white\") show_mismatches = None if 'sex_matches' not in data.columns", "= np.nan output.loc[row.Index, 'actual_sex'] = actual_sex output.loc[row.Index, 'sex_matches'] = sex_matches", "fast, because these are already loaded. Just passes in meth/unmeth", "with the 'meth_values.pkl' and 'unmeth_values.pkl' dataframes path -- to a", "X_col = [] Y_col = [] failed_samples = [] for", "as wrong sex when using -2, but work at -0.5.", "fig = _plot_predicted_sex(data=output, # 'x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent' sample_failure_percent=sample_failure_percent,", "else \"sex_matches\" if show_mismatches: data[\"sex_matches\"] = data[\"sex_matches\"].map({0:\"Mismatch\", 1:\"Match\"}) show_failure =", "'m' in sex_values or 'f' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M',", "darker if low variance. Like a probability cloud. - sample", "'Gender' elif 'Sex' in renamed_columns.values(): renamed_column = 'Sex' if renamed_column", "data_containers containing raw meth/unmeth values, instead. This object is produced", "sex_matches = 1 if actual_sex.upper() == str(row.predicted_sex).upper() else 0 else:", "KeyError: if 'Sample_ID' in output.columns: LOGGER.warning(\"Sample_ID was another column in", "and these labels, to embed in a PDF report if", "for example) If a `poobah_values.pkl` file can be found in", "add more data about samples to the multi-dimensional QC plot", "pd.DataFrame(index=[s for s in meth.columns], columns=['x_median','y_median','predicted_sex']) # get median values", "another column in your output DataFrame; Set that to the", "array for you. median_cutoff the minimum difference in the medians", "change palette if hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0] == 'M': custom_palette", "N_total = len(data['sample_failure_percent'].index) ax.set_title(f\"{N_failed} of {N_total} samples failed poobah, with", "sample_ids and values as labels to apply to samples. e.g.", "include_probe_failure_percent=True, poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False): \"\"\"This will calculate and predict", "if available file_patterns = { 'sample_sheet_meta_data.pkl': 'meta', '*_meta_data.pkl': 'meta', '*samplesheet*.csv':", "loaded_files['meta'] = pd.read_pickle(filename) break if '.csv' in filename.suffixes: loaded_files['meta'] =", "won't work if a containers object or tuple of dataframes", "a folder with csv data that contains processed sample data", "each sex chromosome for each sample x_med = _get_copy_number(x_meth,x_unmeth).median() y_med", "with custom label and without, and check that controls_report still", "poobah[column][poobah[column] >= p_value_cutoff].index failed_x_probe_names = list(set(failed_probe_names) & set(x_probes)) failed_y_probe_names =", "medians of X and Y probe copy numbers to assign", "sample probes that can fail before the sample fails. Default", "you. median_cutoff the minimum difference in the medians of X", "on samples on plot to make it easier to read.", "else None if label: ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center', fontsize=10, color='grey')", "if save: filepath = 'predicted_sexes.png' if data_source_type != 'path' else", "bad samples. poobah_range = data[\"sample_failure_percent\"].max() - data[\"sample_failure_percent\"].min() if poobah_range <", "array_type = ArrayType(array_type) else: raise ValueError(f\"Your array_type must be one", "avoid sizing dots with narrow range; gives false impression of", "sex when using -2, but work at -0.5. # populate", "This adds actual_sex as a column and returns it.\"\"\" #", "= len(data[data['sample_failure_percent'] > poobah_cutoff].index) N_total = len(data['sample_failure_percent'].index) ax.set_title(f\"{N_failed} of {N_total}", "batches yet. meth, unmeth = methylcheck.qc_plot._get_data( data_containers=data_source, path=None, compare=False, noob=False,", "verbose: LOGGER.debug(array_type) LOGGER.setLevel(logging.WARNING) manifest = Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom',", "in the index. This adds actual_sex as a column and", "because Sample_ID repeats in sample sheets. Only using first match,", "# these are a lookup dictionary of labels if return_fig:", "LOW failure rates, but shows IDs when failed - adds", "array here array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda)) elif isinstance(array_type,str): if array_type", "'Sex' else: renamed_columns = {col:(col.title() if col.lower() in ('sex','gender') else", "this label to the marker text labels. Dicts must match", "or Y if include_probe_failure_percent == True and isinstance(poobah, pd.DataFrame): p_value_cutoff", "value to be plotted is male, change palette if hasattr(data,", "matching X and Y probes.\") if save: filepath = 'predicted_sexes.png'", "'Female' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'}) elif 'male' in", "if renamed_column is not None: # next, ensure samplesheet Sex/Gender", "= None output['sex_matches'] = None for row in output.itertuples(): try:", "the array for you. median_cutoff the minimum difference in the", "OR corrected\"\"\" # minfi R version: # log2(getMeth(object) + getUnmeth(object))", "simply adds those this label to the marker text labels.", "instead of a path. Note: ~90% of Y probes should", "in poobah.columns: sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) / len(poobah.index),1) failed_probe_names", "same thing, and only calls get_sex() with the minimum of", "in sex_values or 'FEMALE' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'})", "and warn the user if any did. This feature won't", "p_value_cutoff = 0.05 X_col = [] Y_col = [] failed_samples", "'x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent' sample_failure_percent=sample_failure_percent, median_cutoff=median_cutoff, include_probe_failure_percent=include_probe_failure_percent, verbose=verbose, save=save,", "and gets replaced. if any(loaded_files['meta'].columns.duplicated()): loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()] LOGGER.info(\"Removed", "using methylcheck.load(filepath, format='meth') and lets you customize the import if", "to be at least 2 units. yscale = plt.gca().get_ylim() xscale", "else str(i-26) for i,index_val in enumerate(data.index)} for idx,row in data.iterrows():", "'female':'F'}) elif 'MALE' in sex_values or 'FEMALE' in sex_values: loaded_files['meta'][renamed_column]", "requires methylprep to be installed (pip3 install `methylprep`)\") (data_source_type, data_source)", "if the sample is female. That chromosome is missing.\"\"\" allowed_array_types", "- sample text is (ID, delta age) - sex mismatches", "that can fail before the sample fails. Default is 20", "not prepared using methylprep (non-standand CSV columns, for example) If", "if poobah_df exists, calculate percent X and Y probes that", "marker text labels. Dicts must match the data DF index.", "Sample_ID when sample_sheet uses alt column names and gets replaced.", "as image or `return_fig`. save True: saves the plot, if", "renamed_columns = {col:(col.title() if col.lower() in ('sex','gender') else col) for", "or parsing the containers (which are both slower) # the", "to the index when you pass it in.\") raise KeyError(\"Could", "pickles first, then csvs or parsing the containers (which are", "to, you can rerun the function with return_labels=True and it", "in your samplesheet are not understood (expecting M or F):", "for row in output.itertuples(): try: actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column)) except KeyError:", "understood (expecting M or F): (found {sex_values})\") output['actual_sex'] = None", "blue - marker circle size will be larger and more", "delta age) - sex mismatches are X, matched samples are", "matches may not be accurate.\") actual_sex = actual_sex[0] if hasattr(row,'predicted_sex'):", "methylcheck.qc_plot._get_data( data_containers=data_source, path=None, compare=False, noob=False, verbose=False) elif data_source_type == 'meth_unmeth_tuple':", "methylprep.run_pipeline() or methylcheck.load(path, 'meth') tuple of (meth, unmeth) dataframes array_type", "plt.show() def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output): \"\"\"output is a dataframe with Sample_ID", "also containing samplesheet pkl and poobah_values.pkl, if you want to", "supply a 'path' as data_source to include poobah in plots.", "Like a probability cloud. - sample text is (ID, delta", "== set('M') # if first value to be plotted is", "compare=False, noob=False, verbose=False) elif data_source_type == 'meth_unmeth_tuple': (meth, unmeth) =", "release memory return label_lookup if \"sample_failure_percent\" in data.columns: N_failed =", "values for each sex chromosome for each sample x_med =", "the import if your files were not prepared using methylprep", "(pip3 install `methylprep`)\") (data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source) # data_source_type is", "values output['x_median'] = output.index.map(x_med) output['y_median'] = output.index.map(y_med) # compute difference", "be found in path, the dataframe returned will also include", "pd.read_pickle(filename) break if '.csv' in filename.suffixes: loaded_files['meta'] = pd.read_csv(filename) break", "-2, but work at -0.5. # populate dataframe with predicted", "for filename in Path(filepath).expanduser().rglob(file_pattern): if '.pkl' in filename.suffixes: loaded_files['meta'] =", "~loaded_files['meta'].columns.duplicated()] LOGGER.info(\"Removed a duplicate Sample_ID column in samplesheet\") if 'Sample_ID'", "on plot to make it easier to read. So to", "= sex0 output = output.round(1) # if poobah_df exists, calculate", "Y probes.\") if save: filepath = 'predicted_sexes.png' if data_source_type !=", "dictionary with sample_ids and these labels, to embed in a", "filepath = 'predicted_sexes.png' if data_source_type != 'path' else Path(data_source,'predicted_sexes.png').expanduser() plt.savefig(filepath,", "LOGGER.warning(f\"Multiple samples matched actual sex for {row.Index}, because Sample_ID repeats", "hasattr(row,'predicted_sex'): sex_matches = 1 if actual_sex.upper() == str(row.predicted_sex).upper() else 0", "and 'unmeth_values.pkl' dataframes path -- to a folder also containing", "work if a containers object or tuple of dataframes is", "for zoomed-in plots with few points close together, set the", "so matches may not be accurate.\") actual_sex = actual_sex[0] if", "--- used to predict sex sex0 = ['F' if x", "containers object or tuple of dataframes is passed in, instead", "dataframes is passed in, instead of a path. Note: ~90%", "array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda)) elif isinstance(array_type,str): if array_type in allowed_array_types:", "because samples might only contain one sex if 'Male' in", "# controls_report() does the same thing, and only calls get_sex()", "plot True: creates a plot, with option to `save` as", "median_cutoff=median_cutoff, include_probe_failure_percent=include_probe_failure_percent, verbose=verbose, save=save, poobah_cutoff=poobah_cutoff, custom_label=custom_label, data_source_type=data_source_type, data_source=data_source, return_fig=return_fig, return_labels=return_labels,", "loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns) if 'Gender' in renamed_columns.values(): renamed_column = 'Gender'", "and only calls get_sex() with the minimum of data to", "for {row.Index}, because Sample_ID repeats in sample sheets. Only using", "/ prep 1.6.0 meta_data lacking Sample_ID when sample_sheet uses alt", "_fetch_actual_sex_from_sample_sheet_meta_data(filepath, output): \"\"\"output is a dataframe with Sample_ID in the", "in ('container'): # this will look for saved pickles first,", "because these are already loaded. Just passes in meth/unmeth data", "in file_patterns: for filename in Path(filepath).expanduser().rglob(file_pattern): if '.pkl' in filename.suffixes:", "row['y_median']+0.05, label, horizontalalignment='center', fontsize=10, color='grey') if return_labels: plt.close() # release", "if data_source_type != 'path' else Path(data_source,'predicted_sexes.png').expanduser() plt.savefig(filepath, bbox_inches=\"tight\") if return_fig:", "= loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'}) elif 'MALE' in sex_values or 'FEMALE' in", "a folder with the 'meth_values.pkl' and 'unmeth_values.pkl' dataframes path --", "~90% of Y probes should fail if the sample is", "is one of {'path', 'container', 'control', 'meth_unmeth_tuple'} poobah=None if data_source_type", "function) include_probe_failure_percent: True: includes poobah percent per sample as column", "raw OR corrected\"\"\" # minfi R version: # log2(getMeth(object) +", "# Sample sheet should have 'M' or 'F' in column", "# subset, because samples might only contain one sex if", "poobah, with at least {poobah_cutoff}% of probes failing\") else: ax.set_title(f\"Predicted", "'_' + loaded_files['meta']['Sentrix_Position'].astype(str) loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') else: raise ValueError(\"Your sample", "probes that failed sample_failure_percent = {} # % of ALL", "This object is produced by methylprep.run_pipeline, or by using methylcheck.load(filepath,", "output data index\") #sns.set_theme(style=\"white\") show_mismatches = None if 'sex_matches' not", "X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1) Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1) X_col.append(X_percent) Y_col.append(Y_percent) if X_percent", "np.nan output.loc[row.Index, 'actual_sex'] = actual_sex output.loc[row.Index, 'sex_matches'] = sex_matches else:", "raise ValueError(f\"Cannot compare with predicted sex because actual sexes listed", "and unmeth values for the sex chromosomes x_meth = meth[meth.index.isin(x_probes)]", "more faded if poobah values are worse, smaller and darker", "was another column in your output DataFrame; Set that to", "y_unmeth = unmeth[unmeth.index.isin(y_probes)] # create empty dataframe for output output", "in filename.suffixes: loaded_files['meta'] = pd.read_pickle(filename) break if '.csv' in filename.suffixes:", "fig.fig.subplots_adjust(top=.95) # for zoomed-in plots with few points close together,", "data columns: ['x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'] - color is", "Default is 20 (percent) Has no effect if `include_probe_failure_percent` is", "data_source_type in ('path'): output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output) if plot ==", "this will look for saved pickles first, then csvs or", "-- to a folder also containing samplesheet pkl and poobah_values.pkl,", "palette=custom_palette, height=8, aspect=1.34) ax = fig.axes[0,0] fig.fig.subplots_adjust(top=.95) # for zoomed-in", "if hasattr(output, 'actual_sex') and set(output.actual_sex) == set('M') # if first", "with csv data that contains processed sample data path --", "difference median_difference = output['y_median'] - output['x_median'] # median cutoff -", "sample_failure_percent != {} and set(sample_failure_percent.keys()) == set(data.index): data['sample_failure_percent'] = pd.Series(sample_failure_percent)", "p-value probe detection. Predictions for these may be unreliable:\") LOGGER.warning(f\"{failed_samples}\")", "will load the data from data_source and determine the array", "legend of sketchy samples and labels - show delta age", "None: # next, ensure samplesheet Sex/Gender (Male/Female) are recoded as", "of probes failing\") else: ax.set_title(f\"Predicted sex based on matching X", "elif data_source_type in ('container'): # this will look for saved", "as sample_ids and values as labels to apply to samples.", "('container'): # this will look for saved pickles first, then", "should have 'M' or 'F' in column to match predicted", "minfi R version: # log2(getMeth(object) + getUnmeth(object)) return np.log2(meth+unmeth) def", "sex chromosome for each sample x_med = _get_copy_number(x_meth,x_unmeth).median() y_med =", "array_type in allowed_array_types: array_type = ArrayType(array_type) else: raise ValueError(f\"Your array_type", "color is sex, pink or blue - marker circle size", "if '.pkl' in filename.suffixes: loaded_files['meta'] = pd.read_pickle(filename) break if '.csv'", "with predicted sex output['predicted_sex'] = sex0 output = output.round(1) #", "output.index.map(y_med) # compute difference median_difference = output['y_median'] - output['x_median'] #", "data_source_type == 'meth_unmeth_tuple': (meth, unmeth) = data_source if len(meth) !=", "'.csv' in filename.suffixes: loaded_files['meta'] = pd.read_csv(filename) break if len(loaded_files) ==", "produced by methylprep.run_pipeline, or by using methylcheck.load(filepath, format='meth') and lets", "label and without, and check that controls_report still works with", "sex of each sample. inputs: ======= the \"data_source\" can be", "= meth[meth.index.isin(y_probes)] y_unmeth = unmeth[unmeth.index.isin(y_probes)] # create empty dataframe for", "data from data_source and determine the array for you. median_cutoff", "predicted as wrong sex when using -2, but work at", "meta_data lacking Sample_ID when sample_sheet uses alt column names and", "ImportError(\"This function requires methylprep to be installed (pip3 install `methylprep`)\")", "predict sex sex0 = ['F' if x < median_cutoff else", "path=data_source, compare=False, noob=True, verbose=False) if include_probe_failure_percent == True and Path(data_source,'poobah_values.pkl').expanduser().exists():", "None if 'sex_matches' not in data.columns else \"sex_matches\" if show_mismatches:", "['x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'] - color is sex, pink", "csv data that contains processed sample data path -- to", "be fast, because these are already loaded. Just passes in", "table and on the plot. Note: you must supply a", "verbose=False) elif data_source_type == 'meth_unmeth_tuple': (meth, unmeth) = data_source if", "a column and returns it.\"\"\" # controls_report() does the same", "loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'}) elif 'male' in sex_values or 'female' in sex_values:", "delta age on labels (using custom column dict) - unit", "determine the array for you. median_cutoff the minimum difference in", "values are worse, smaller and darker if low variance. Like", "poobah_cutoff/2: show_failure = None sample_sizes = (40,40) custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7']))", "Y probes - using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and auto-detected array", "Default is False. Note: return_fig will not show a plot", "{'path', 'container', 'control', 'meth_unmeth_tuple'} poobah=None if data_source_type in ('path'): #", "verbose=verbose, save=save, poobah_cutoff=poobah_cutoff, custom_label=custom_label, data_source_type=data_source_type, data_source=data_source, return_fig=return_fig, return_labels=return_labels, ) if", "label_lookup[idx] ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center', fontsize=10, color='darkred') else: label =", "col.lower() in ('sex','gender') else col) for col in loaded_files['meta'].columns} loaded_files['meta']", "seaborn as sns from pathlib import Path #app import methylcheck", "get_sex() with the minimum of data to be fast, because", "= methylcheck.qc_plot._get_data( data_containers=data_source, path=None, compare=False, noob=False, verbose=False) elif data_source_type ==", "did. This feature won't work if a containers object or", "had >10% of X probes fail p-value probe detection. Predictions", "wrong sex when using -2, but work at -0.5. #", "is not None: # next, ensure samplesheet Sex/Gender (Male/Female) are", "try: actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column)) except KeyError: if 'Sample_ID' in output.columns:", "renamed_columns.values(): renamed_column = 'Gender' elif 'Sex' in renamed_columns.values(): renamed_column =", "pd.Series(sample_failure_percent) else: LOGGER.warning(\"sample_failure_percent index did not align with output data", "\"sample_failure_percent\" in data.columns: N_failed = len(data[data['sample_failure_percent'] > poobah_cutoff].index) N_total =", "-2, include_probe_failure_percent=True, verbose=False, save=False, poobah_cutoff=20, #% custom_label=None, data_source_type=None, data_source=None, return_fig=False,", "samples had >10% of X probes fail p-value probe detection.", "values. can be raw OR corrected\"\"\" # minfi R version:", "male, change palette if hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0] == 'M':", "= output['y_median'] - output['x_median'] # median cutoff - can be", "ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center', fontsize=10, color='grey') if return_labels: plt.close() #", "but shows IDs when failed - adds legend of sketchy", "# release memory return label_lookup if \"sample_failure_percent\" in data.columns: N_failed", "(ID, delta age) - sex mismatches are X, matched samples", "pandas as pd import numpy as np import matplotlib.pyplot as", "= data_source if len(meth) != len(unmeth): raise ValueError(f\"WARNING: probe count", "or (Sentrix_ID and Sentrix_Position) columns.\") # fixing case of the", "data about samples to the multi-dimensional QC plot while providing", "Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1) X_col.append(X_percent) Y_col.append(Y_percent) if X_percent > 10: failed_samples.append(column)", "for idx,row in data.iterrows(): if \"sample_failure_percent\" in row and row['sample_failure_percent']", "files with two Sample_ID columns. Check and fix here: #", "plots. poobah_cutoff The maximum percent of sample probes that can", "is blue; female is pink # if hasattr(output, 'actual_sex') and", "sample. inputs: ======= the \"data_source\" can be any one of:", "in filename.suffixes: loaded_files['meta'] = pd.read_csv(filename) break if len(loaded_files) == 1:", "array_type (string) enum: {'27k','450k','epic','epic+','mouse'} if not specified, it will load", "and Y chromosomes that failed quality control, and warn the", "only calls get_sex() with the minimum of data to be", "sex_values or 'f' in sex_values: loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'}) else:", "pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser()) elif data_source_type in ('container'): # this will look for", "ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center', fontsize=10, color='darkred') else: label = f\"{custom_label.get(idx)}\"", "of data_containers containing raw meth/unmeth values, instead. This object is", "loaded_files['meta'].columns and 'Sentrix_Position' in loaded_files['meta'].columns: loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) + '_'", "loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()] LOGGER.info(\"Removed a duplicate Sample_ID column in samplesheet\") if", "ArrayType except ImportError: raise ImportError(\"This function requires methylprep to be", "\"\"\"This will calculate and predict the sex of each sample.", "columns.\") # fixing case of the relevant column renamed_column =", "= loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'}) else: raise ValueError(f\"Cannot compare with predicted sex", "sample text is (ID, delta age) - sex mismatches are", "'M' or 'F' in column to match predicted sex. #", "--- default = -2 --- used to predict sex sex0", "= output.index.map(x_med) output['y_median'] = output.index.map(y_med) # compute difference median_difference =", "if len(loaded_files) == 1: # methylprep v1.5.4-6 was creating meta_data", "each sample. inputs: ======= the \"data_source\" can be any one", "# median cutoff - can be manipulated by user ---", "verbose=False) except Exception as e: meth, unmeth = methylcheck.qc_plot._get_data( data_containers=None,", "any did. This feature won't work if a containers object", "hue=\"predicted_sex\", size=show_failure, style=show_mismatches, sizes=sample_sizes, alpha=.5, palette=custom_palette, height=8, aspect=1.34) ax =", "installed (pip3 install `methylprep`)\") (data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source) # data_source_type", "failed quality control, and warn the user if any did.", "# methylprep v1.5.4-6 was creating meta_data files with two Sample_ID", "controls_report() does NOT do this step, but should. sex_values =", "_get_copy_number(y_meth,y_unmeth).median() # populate output dataframe with values output['x_median'] = output.index.map(x_med)", "of a path. Note: ~90% of Y probes should fail", "for batches yet. meth, unmeth = methylcheck.qc_plot._get_data( data_containers=data_source, path=None, compare=False,", "specified, it will load the data from data_source and determine", "= str(loaded_files['meta'].loc[row.Index].get(renamed_column)) except KeyError: if 'Sample_ID' in output.columns: LOGGER.warning(\"Sample_ID was", "include_probe_failure_percent=include_probe_failure_percent, verbose=verbose, save=save, poobah_cutoff=poobah_cutoff, custom_label=custom_label, data_source_type=data_source_type, data_source=data_source, return_fig=return_fig, return_labels=return_labels, )", "of sketchy samples and labels - show delta age on", "break if '.csv' in filename.suffixes: loaded_files['meta'] = pd.read_csv(filename) break if", "output table and on the plot. Note: you must supply", "True: saves the plot, if plot is True return_fig If", "a containers object or tuple of dataframes is passed in,", "as plt import seaborn as sns from pathlib import Path", "data.iterrows(): if \"sample_failure_percent\" in row and row['sample_failure_percent'] > poobah_cutoff: label", "loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'}) elif 'MALE' in sex_values or 'FEMALE'", "a duplicate Sample_ID column in samplesheet\") if 'Sample_ID' in loaded_files['meta'].columns:", "the sample is female. That chromosome is missing.\"\"\" allowed_array_types =", "= actual_sex[0] if hasattr(row,'predicted_sex'): sex_matches = 1 if actual_sex.upper() ==", "in the medians of X and Y probe copy numbers", "- can be manipulated by user --- default = -2", "return fig return output def _plot_predicted_sex(data=pd.DataFrame(), sample_failure_percent={}, median_cutoff= -2, include_probe_failure_percent=True,", "ax.set_title(f\"{N_failed} of {N_total} samples failed poobah, with at least {poobah_cutoff}%", "output DataFrame; Set that to the index when you pass", "a `poobah_values.pkl` file can be found in path, the dataframe", "= f\"{custom_label.get(idx)}\" if isinstance(custom_label, dict) else None if label: ax.text(row['x_median']+0.05,", "`save` as image or `return_fig`. save True: saves the plot,", "male is blue; female is pink # if hasattr(output, 'actual_sex')", "a 'path' as data_source to include poobah in plots. poobah_cutoff", "= sns.set_palette(sns.color_palette(['#FE6E89','#0671B7'])) # if only one sex, make sure male", "data_containers=None, path=data_source, compare=False, noob=False, verbose=False) except Exception as e: meth,", "be installed (pip3 install `methylprep`)\") (data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source) #", "== 1: # methylprep v1.5.4-6 was creating meta_data files with", "compare.\") if isinstance(actual_sex, pd.Series): LOGGER.warning(f\"Multiple samples matched actual sex for", "per sample as column in the output table and on", "\"sample_failure_percent\" sample_sizes = (20, 600) if show_failure: # avoid sizing", "the 'meth_values.pkl' and 'unmeth_values.pkl' dataframes path -- to a folder", "data_source if len(meth) != len(unmeth): raise ValueError(f\"WARNING: probe count mismatch:", "-- to a folder with csv data that contains processed", "pyplot figure instead of a dataframe. Default is False. Note:", "a \"custom_label\" dict passed in, such as (actual_age - predicted_age),", "meth[meth.index.isin(x_probes)] x_unmeth = unmeth[unmeth.index.isin(x_probes)] y_meth = meth[meth.index.isin(y_probes)] y_unmeth = unmeth[unmeth.index.isin(y_probes)]", "verbose=False, save=False, poobah_cutoff=20, #% custom_label=None, data_source_type=None, data_source=None, return_fig=False, return_labels=False): \"\"\"", "actual sex data) - omits labels for samples that have", "median_cutoff= -2, include_probe_failure_percent=True, poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False): \"\"\"This will calculate", "containing samplesheet pkl and poobah_values.pkl, if you want to compare", "\"custom_label\" dict passed in, such as (actual_age - predicted_age), it", "poobah values are worse, smaller and darker if low variance.", "= pd.Series(sample_failure_percent) else: LOGGER.warning(\"sample_failure_percent index did not align with output", "compare with predicted sex because actual sexes listed in your", "probes that can fail before the sample fails. Default is", "array_type=None, verbose=False, plot=False, save=False, on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True, poobah_cutoff=20, custom_label=None,", "LOGGER.setLevel(logging.INFO) x_probes = manifest.index[manifest['CHR']=='X'] y_probes = manifest.index[manifest['CHR']=='Y'] if verbose: LOGGER.info(f\"Found", "'*samplesheet*.csv': 'meta', '*sample_sheet*.csv': 'meta', } loaded_files = {} for file_pattern", "with few points close together, set the min scale to", "Has no effect if `include_probe_failure_percent` is False. plot True: creates", "output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output) if plot == True: fig =", "compute difference median_difference = output['y_median'] - output['x_median'] # median cutoff", "poobah_cutoff: label = f\"{label_lookup[idx]}, {custom_label.get(idx)}\" if isinstance(custom_label, dict) and custom_label.get(idx)", "in meth.columns], columns=['x_median','y_median','predicted_sex']) # get median values for each sex", "data # Sample sheet should have 'M' or 'F' in", "(Male/Female) are recoded as M/F; controls_report() does NOT do this", "the dataframe returned will also include percent of probes for", "'M' for x in median_difference] # NOTE for testing: GSE85566/GPL13534", "instead of a dataframe. Default is False. Note: return_fig will", "make it easier to read. So to get what sample_ids", "loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'}) else: raise ValueError(f\"Cannot compare with predicted sex because", "raise ValueError(\"Your sample sheet must have a Sample_ID column, or", "pass # no Sex/Gender column found in samplesheet return output", "to a folder with the 'meth_values.pkl' and 'unmeth_values.pkl' dataframes path", "first match, so matches may not be accurate.\") actual_sex =", "list(set(failed_probe_names) & set(y_probes)) X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1) Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1) X_col.append(X_percent)", "LOGGER.warning(\"sample_failure_percent index did not align with output data index\") #sns.set_theme(style=\"white\")", "samplesheet are not understood (expecting M or F): (found {sex_values})\")", "hasattr(output, 'actual_sex') and set(output.actual_sex) == set('M') # if first value", "data_source_type=None, data_source=None, return_fig=False, return_labels=False): \"\"\" data columns: ['x_median', 'y_median', 'predicted_sex',", "probes - using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and auto-detected array here", "if actual_sex.upper() == str(row.predicted_sex).upper() else 0 else: sex_matches = np.nan", "be larger and more faded if poobah values are worse,", "plot if there is a \"custom_label\" dict passed in, such", "in plots. poobah_cutoff The maximum percent of sample probes that", "of {'path', 'container', 'control', 'meth_unmeth_tuple'} poobah=None if data_source_type in ('path'):", "import ArrayType except ImportError: raise ImportError(\"This function requires methylprep to", "and just return a dictionary with sample_ids and these labels,", "y='y_median', hue=\"predicted_sex\", size=show_failure, style=show_mismatches, sizes=sample_sizes, alpha=.5, palette=custom_palette, height=8, aspect=1.34) ax", "< poobah_cutoff/2: show_failure = None sample_sizes = (40,40) custom_palette =", "poobah_cutoff The maximum percent of sample probes that can fail", "look for saved pickles first, then csvs or parsing the", "one of these: {allowed_array_types} or None.\") if verbose: LOGGER.debug(array_type) LOGGER.setLevel(logging.WARNING)", "p_value_cutoff].index) / len(poobah.index),1) failed_probe_names = poobah[column][poobah[column] >= p_value_cutoff].index failed_x_probe_names =", "only contain one sex if 'Male' in sex_values or 'Female'", "text labels. Dicts must match the data DF index. \"\"\"", "probes failing\") else: ax.set_title(f\"Predicted sex based on matching X and", "return_fig: return fig plt.show() def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output): \"\"\"output is a", "show a plot on screen. return_labels: (requires plot == True)", "this function - save_fig - return_labels, returns a lookup dict", "is passed in, instead of a path. Note: ~90% of", "difference in the medians of X and Y probe copy", "abs(yscale[1]-yscale[0]) < 2.0: ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1) ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1) label_lookup = {index_val:", "omits labels for samples that have LOW failure rates, but", "female. That chromosome is missing.\"\"\" allowed_array_types = {'27k','450k','epic','epic+','mouse'} try: from", "rates, but shows IDs when failed - adds legend of", "age on labels (using custom column dict) - unit tests", "elif data_source_type == 'meth_unmeth_tuple': (meth, unmeth) = data_source if len(meth)", "samplesheet\") if 'Sample_ID' in loaded_files['meta'].columns: loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') elif 'Sentrix_ID'", "methylcheck.load(path, 'meth') tuple of (meth, unmeth) dataframes array_type (string) enum:", "loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') elif 'Sentrix_ID' in loaded_files['meta'].columns and 'Sentrix_Position' in", "elif 'male' in sex_values or 'female' in sex_values: loaded_files['meta'][renamed_column] =", "a lookup dict instead of plot if there is a", "what sample_ids these labels correspond to, you can rerun the", "and isinstance(poobah, pd.DataFrame): p_value_cutoff = 0.05 X_col = [] Y_col", "if hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0] == 'M': custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89']))", "instead. This object is produced by methylprep.run_pipeline, or by using", "fig # these are a lookup dictionary of labels if", "'container', 'control', 'meth_unmeth_tuple'} poobah=None if data_source_type in ('path'): # this", "# populate output dataframe with values output['x_median'] = output.index.map(x_med) output['y_median']", "output.columns: LOGGER.warning(\"Sample_ID was another column in your output DataFrame; Set", "= (40,40) custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7'])) # if only one sex,", "ImportError: raise ImportError(\"This function requires methylprep to be installed (pip3", "in samplesheet\") if 'Sample_ID' in loaded_files['meta'].columns: loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID') elif", "'Sample_ID' in output.columns: LOGGER.warning(\"Sample_ID was another column in your output", "sex0 = ['F' if x < median_cutoff else 'M' for", "a dataframe. Default is False. Note: return_fig will not show", "should fail if the sample is female. That chromosome is", "if any(loaded_files['meta'].columns.duplicated()): loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()] LOGGER.info(\"Removed a duplicate Sample_ID", "methylprep.models import ArrayType except ImportError: raise ImportError(\"This function requires methylprep", "sex for {row.Index}, because Sample_ID repeats in sample sheets. Only", "matched actual sex for {row.Index}, because Sample_ID repeats in sample", "names and gets replaced. if any(loaded_files['meta'].columns.duplicated()): loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()]", "palette if hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0] == 'M': custom_palette =", "(found {sex_values})\") output['actual_sex'] = None output['sex_matches'] = None for row", "of {N_total} samples failed poobah, with at least {poobah_cutoff}% of", "== None: # get list of X any Y probes", "Note: ~90% of Y probes should fail if the sample", "option to `save` as image or `return_fig`. save True: saves", "return np.log2(meth+unmeth) def get_sex(data_source, array_type=None, verbose=False, plot=False, save=False, on_lambda=False, median_cutoff=", "if col.lower() in ('sex','gender') else col) for col in loaded_files['meta'].columns}", "size=show_failure, style=show_mismatches, sizes=sample_sizes, alpha=.5, palette=custom_palette, height=8, aspect=1.34) ax = fig.axes[0,0]", "{len(y_probes)} Y probes\") # dataframes of meth and unmeth values", "import matplotlib.pyplot as plt import seaborn as sns from pathlib", "except ImportError: raise ImportError(\"This function requires methylprep to be installed", "set(loaded_files['meta'][renamed_column].unique()) #print('sex_values', sex_values) if not sex_values.issubset(set(['M','F'])): # subset, because samples", "does the same thing, and only calls get_sex() with the", "sample_ids these labels correspond to, you can rerun the function", "the data from data_source and determine the array for you.", "isinstance(array_type,str): if array_type in allowed_array_types: array_type = ArrayType(array_type) else: raise", "save: filepath = 'predicted_sexes.png' if data_source_type != 'path' else Path(data_source,'predicted_sexes.png').expanduser()", "sample data path -- to a folder with the 'meth_values.pkl'", "creates a plot, with option to `save` as image or", "thing, and only calls get_sex() with the minimum of data", "you pass it in.\") raise KeyError(\"Could not read actual sex", "'sample_failure_percent' not in data.columns else \"sample_failure_percent\" sample_sizes = (20, 600)", "custom_label=custom_label, data_source_type=data_source_type, data_source=data_source, return_fig=return_fig, return_labels=return_labels, ) if return_labels: return fig", "# dataframes of meth and unmeth values for the sex", "- marker circle size will be larger and more faded", "that failed sample_failure_percent = {} # % of ALL probes", "if 'Gender' in renamed_columns.values(): renamed_column = 'Gender' elif 'Sex' in", "(Sentrix_ID and Sentrix_Position) columns.\") # fixing case of the relevant", "== set(data.index): data['sample_failure_percent'] = pd.Series(sample_failure_percent) else: LOGGER.warning(\"sample_failure_percent index did not", "\"\"\" if sample_failure_percent != {} and set(sample_failure_percent.keys()) == set(data.index): data['sample_failure_percent']", "\"\"\"function to return copy number. requires dataframes of methylated and", "the same thing, and only calls get_sex() with the minimum", "'path' as data_source to include poobah in plots. poobah_cutoff The", "can be any one of: path -- to a folder", "when sample_sheet uses alt column names and gets replaced. if", "save=False, on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True, poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False): \"\"\"This", "custom label and without, and check that controls_report still works", "return copy number. requires dataframes of methylated and unmethylated values.", "may not be accurate.\") actual_sex = actual_sex[0] if hasattr(row,'predicted_sex'): sex_matches", "samples failed poobah, with at least {poobah_cutoff}% of probes failing\")" ]
[ "[ ('accounts', '0002_auto_20201115_1531'), ] operations = [ migrations.AlterField( model_name='customuser', name='user_id',", "= [ migrations.AlterField( model_name='customuser', name='user_id', field=models.IntegerField(blank=True, null=True, unique=True, validators=[django.core.validators.MaxValueValidator(99999), django.core.validators.MinValueValidator(10000),", "# Generated by Django 3.1.2 on 2020-11-15 15:37 import django.core.validators", "import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies", "Generated by Django 3.1.2 on 2020-11-15 15:37 import django.core.validators from", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0002_auto_20201115_1531'),", "] operations = [ migrations.AlterField( model_name='customuser', name='user_id', field=models.IntegerField(blank=True, null=True, unique=True,", "3.1.2 on 2020-11-15 15:37 import django.core.validators from django.db import migrations,", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts',", "on 2020-11-15 15:37 import django.core.validators from django.db import migrations, models", "model_name='customuser', name='user_id', field=models.IntegerField(blank=True, null=True, unique=True, validators=[django.core.validators.MaxValueValidator(99999), django.core.validators.MinValueValidator(10000), django.core.validators.MinLengthValidator(5), django.core.validators.MaxLengthValidator(5)]), ),", "operations = [ migrations.AlterField( model_name='customuser', name='user_id', field=models.IntegerField(blank=True, null=True, unique=True, validators=[django.core.validators.MaxValueValidator(99999),", "class Migration(migrations.Migration): dependencies = [ ('accounts', '0002_auto_20201115_1531'), ] operations =", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "= [ ('accounts', '0002_auto_20201115_1531'), ] operations = [ migrations.AlterField( model_name='customuser',", "[ migrations.AlterField( model_name='customuser', name='user_id', field=models.IntegerField(blank=True, null=True, unique=True, validators=[django.core.validators.MaxValueValidator(99999), django.core.validators.MinValueValidator(10000), django.core.validators.MinLengthValidator(5),", "15:37 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration):", "django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0002_auto_20201115_1531'), ]", "'0002_auto_20201115_1531'), ] operations = [ migrations.AlterField( model_name='customuser', name='user_id', field=models.IntegerField(blank=True, null=True,", "Django 3.1.2 on 2020-11-15 15:37 import django.core.validators from django.db import", "models class Migration(migrations.Migration): dependencies = [ ('accounts', '0002_auto_20201115_1531'), ] operations", "dependencies = [ ('accounts', '0002_auto_20201115_1531'), ] operations = [ migrations.AlterField(", "2020-11-15 15:37 import django.core.validators from django.db import migrations, models class", "migrations.AlterField( model_name='customuser', name='user_id', field=models.IntegerField(blank=True, null=True, unique=True, validators=[django.core.validators.MaxValueValidator(99999), django.core.validators.MinValueValidator(10000), django.core.validators.MinLengthValidator(5), django.core.validators.MaxLengthValidator(5)]),", "('accounts', '0002_auto_20201115_1531'), ] operations = [ migrations.AlterField( model_name='customuser', name='user_id', field=models.IntegerField(blank=True,", "name='user_id', field=models.IntegerField(blank=True, null=True, unique=True, validators=[django.core.validators.MaxValueValidator(99999), django.core.validators.MinValueValidator(10000), django.core.validators.MinLengthValidator(5), django.core.validators.MaxLengthValidator(5)]), ), ]", "Migration(migrations.Migration): dependencies = [ ('accounts', '0002_auto_20201115_1531'), ] operations = [", "by Django 3.1.2 on 2020-11-15 15:37 import django.core.validators from django.db" ]
[ "= capfd.readouterr() assert captured.out == tip1plainbody def test_noargs(capfd): \"\"\"just make", "format def test_category_and_id_mutex(capfd): ret = subprocess.call([\"git-fortune\", \"--category\", \"diff\", \"--id\", \"3\"])", "git tips Invoke it as 'git-fortune' or 'git fortune' \"\"\"", "subprocess.check_call([\"git-fortune\", \"--id\", \"3\"]) tip3boxbody = fix_line_endings( \"\"\"\\ +-------------------------------------------------------------------------------+ | GIT", "of the git history. | | | | Try adding", "tip1plainbody def test_noargs(capfd): \"\"\"just make sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\"])", "capfd.readouterr() assert captured.out == tip3boxbody def test_tip_plainformat(capfd): subprocess.check_call([\"git-fortune\", \"--format\", \"plain\",", "sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\"]) captured = capfd.readouterr() assert \"GIT", "\"GIT TIP #\" in captured.out # from the box format", "captured = capfd.readouterr() assert ( fix_line_endings( \"\"\" A fortune-like command", "box format def test_category(capfd): \"\"\"just make sure it doesn't crashfail\"\"\"", "sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\", \"--category\", \"diff\"]) captured = capfd.readouterr()", "test_category(capfd): \"\"\"just make sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\", \"--category\", \"diff\"])", "test_tip_plainformat(capfd): subprocess.check_call([\"git-fortune\", \"--format\", \"plain\", \"--id\", \"1\"]) tip1plainbody = fix_line_endings( \"Modify", "captured.out == tip3boxbody def test_tip_plainformat(capfd): subprocess.check_call([\"git-fortune\", \"--format\", \"plain\", \"--id\", \"1\"])", "git history. | | | | Try adding in `--oneline", "make sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\"]) captured = capfd.readouterr() assert", "import __version__ def test_help(capfd): subprocess.check_call([\"git-fortune\", \"-h\"]) captured = capfd.readouterr() assert", "\"\"\"\\ +-------------------------------------------------------------------------------+ | GIT TIP #3 | | | |", "\"--version\"]) captured = capfd.readouterr() assert \"git-fortune {}\".format(__version__) in captured.out def", "in captured.out def test_tip_boxformat(capfd): subprocess.check_call([\"git-fortune\", \"--id\", \"3\"]) tip3boxbody = fix_line_endings(", "captured = capfd.readouterr() assert captured.out == tip1plainbody def test_noargs(capfd): \"\"\"just", "def test_help(capfd): subprocess.check_call([\"git-fortune\", \"-h\"]) captured = capfd.readouterr() assert ( fix_line_endings(", "captured.out assert \"argument --id: not allowed with argument --category\" in", "#\" in captured.out # from the box format def test_category_and_id_mutex(capfd):", "from the box format def test_category(capfd): \"\"\"just make sure it", "( fix_line_endings( \"\"\" A fortune-like command for showing git tips", "'git fortune' \"\"\" ) in captured.out ) def test_version(capfd): subprocess.check_call([\"git-fortune\",", "TIP #3 | | | | `git log --graph` can", "def test_tip_boxformat(capfd): subprocess.check_call([\"git-fortune\", \"--id\", \"3\"]) tip3boxbody = fix_line_endings( \"\"\"\\ +-------------------------------------------------------------------------------+", "| | | Try adding in `--oneline --decorate --all`. |", "= capfd.readouterr() assert \"\" == captured.out assert \"argument --id: not", "| | | | `git log --graph` can show you", "__version__ def test_help(capfd): subprocess.check_call([\"git-fortune\", \"-h\"]) captured = capfd.readouterr() assert (", "ret = subprocess.call([\"git-fortune\", \"--category\", \"diff\", \"--id\", \"3\"]) assert ret ==", "\"--category\", \"diff\", \"--id\", \"3\"]) assert ret == 2 captured =", "Invoke it as 'git-fortune' or 'git fortune' \"\"\" ) in", "crashfail\"\"\" subprocess.check_call([\"git-fortune\"]) captured = capfd.readouterr() assert \"GIT TIP #\" in", "commit before pushing with `git commit --amend`.\\n\" ) captured =", "| +-------------------------------------------------------------------------------+ \"\"\" ) captured = capfd.readouterr() assert captured.out ==", "or 'git fortune' \"\"\" ) in captured.out ) def test_version(capfd):", "| | `git log --graph` can show you a tree-like", "= capfd.readouterr() assert ( fix_line_endings( \"\"\" A fortune-like command for", "\"\"\" A fortune-like command for showing git tips Invoke it", "| | | | Try adding in `--oneline --decorate --all`.", "assert captured.out == tip1plainbody def test_noargs(capfd): \"\"\"just make sure it", "in captured.out ) def test_version(capfd): subprocess.check_call([\"git-fortune\", \"--version\"]) captured = capfd.readouterr()", "subprocess from git_fortune._compat import fix_line_endings from git_fortune.version import __version__ def", "captured.out == tip1plainbody def test_noargs(capfd): \"\"\"just make sure it doesn't", "def test_category(capfd): \"\"\"just make sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\", \"--category\",", "+-------------------------------------------------------------------------------+ \"\"\" ) captured = capfd.readouterr() assert captured.out == tip3boxbody", "assert ( fix_line_endings( \"\"\" A fortune-like command for showing git", "log --graph` can show you a tree-like representation of the", "\"1\"]) tip1plainbody = fix_line_endings( \"Modify your last commit before pushing", "assert captured.out == tip3boxbody def test_tip_plainformat(capfd): subprocess.check_call([\"git-fortune\", \"--format\", \"plain\", \"--id\",", "in captured.out # from the box format def test_category(capfd): \"\"\"just", "captured.out def test_tip_boxformat(capfd): subprocess.check_call([\"git-fortune\", \"--id\", \"3\"]) tip3boxbody = fix_line_endings( \"\"\"\\", "captured = capfd.readouterr() assert \"\" == captured.out assert \"argument --id:", "subprocess.check_call([\"git-fortune\", \"--category\", \"diff\"]) captured = capfd.readouterr() assert \"GIT TIP #\"", "def test_noargs(capfd): \"\"\"just make sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\"]) captured", "= capfd.readouterr() assert \"GIT TIP #\" in captured.out # from", "it as 'git-fortune' or 'git fortune' \"\"\" ) in captured.out", "\"git-fortune {}\".format(__version__) in captured.out def test_tip_boxformat(capfd): subprocess.check_call([\"git-fortune\", \"--id\", \"3\"]) tip3boxbody", "assert \"GIT TIP #\" in captured.out # from the box", "\"\"\"just make sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\"]) captured = capfd.readouterr()", "import subprocess from git_fortune._compat import fix_line_endings from git_fortune.version import __version__", "= fix_line_endings( \"Modify your last commit before pushing with `git", "fortune' \"\"\" ) in captured.out ) def test_version(capfd): subprocess.check_call([\"git-fortune\", \"--version\"])", "assert \"argument --id: not allowed with argument --category\" in captured.err", "\"--id\", \"3\"]) assert ret == 2 captured = capfd.readouterr() assert", "\"\" == captured.out assert \"argument --id: not allowed with argument", "captured = capfd.readouterr() assert \"GIT TIP #\" in captured.out #", ") captured = capfd.readouterr() assert captured.out == tip3boxbody def test_tip_plainformat(capfd):", "history. | | | | Try adding in `--oneline --decorate", "= subprocess.call([\"git-fortune\", \"--category\", \"diff\", \"--id\", \"3\"]) assert ret == 2", "doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\", \"--category\", \"diff\"]) captured = capfd.readouterr() assert \"GIT", "\"--format\", \"plain\", \"--id\", \"1\"]) tip1plainbody = fix_line_endings( \"Modify your last", "from the box format def test_category_and_id_mutex(capfd): ret = subprocess.call([\"git-fortune\", \"--category\",", "for showing git tips Invoke it as 'git-fortune' or 'git", "doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\"]) captured = capfd.readouterr() assert \"GIT TIP #\"", "before pushing with `git commit --amend`.\\n\" ) captured = capfd.readouterr()", "--graph` can show you a tree-like representation of the git", "fix_line_endings( \"\"\"\\ +-------------------------------------------------------------------------------+ | GIT TIP #3 | | |", "\"plain\", \"--id\", \"1\"]) tip1plainbody = fix_line_endings( \"Modify your last commit", "fix_line_endings( \"Modify your last commit before pushing with `git commit", "last commit before pushing with `git commit --amend`.\\n\" ) captured", "= fix_line_endings( \"\"\"\\ +-------------------------------------------------------------------------------+ | GIT TIP #3 | |", "git_fortune._compat import fix_line_endings from git_fortune.version import __version__ def test_help(capfd): subprocess.check_call([\"git-fortune\",", "make sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\", \"--category\", \"diff\"]) captured =", "+-------------------------------------------------------------------------------+ | GIT TIP #3 | | | | `git", "tip3boxbody = fix_line_endings( \"\"\"\\ +-------------------------------------------------------------------------------+ | GIT TIP #3 |", "can show you a tree-like representation of the git history.", ") in captured.out ) def test_version(capfd): subprocess.check_call([\"git-fortune\", \"--version\"]) captured =", "capfd.readouterr() assert ( fix_line_endings( \"\"\" A fortune-like command for showing", "== tip3boxbody def test_tip_plainformat(capfd): subprocess.check_call([\"git-fortune\", \"--format\", \"plain\", \"--id\", \"1\"]) tip1plainbody", "tip1plainbody = fix_line_endings( \"Modify your last commit before pushing with", "\"\"\" ) in captured.out ) def test_version(capfd): subprocess.check_call([\"git-fortune\", \"--version\"]) captured", "\"Modify your last commit before pushing with `git commit --amend`.\\n\"", "assert \"\" == captured.out assert \"argument --id: not allowed with", "== 2 captured = capfd.readouterr() assert \"\" == captured.out assert", "you a tree-like representation of the git history. | |", "# from the box format def test_category(capfd): \"\"\"just make sure", "\"\"\"just make sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\", \"--category\", \"diff\"]) captured", "ret == 2 captured = capfd.readouterr() assert \"\" == captured.out", "\"3\"]) tip3boxbody = fix_line_endings( \"\"\"\\ +-------------------------------------------------------------------------------+ | GIT TIP #3", "tree-like representation of the git history. | | | |", "| | +-------------------------------------------------------------------------------+ \"\"\" ) captured = capfd.readouterr() assert captured.out", "your last commit before pushing with `git commit --amend`.\\n\" )", "\"--id\", \"3\"]) tip3boxbody = fix_line_endings( \"\"\"\\ +-------------------------------------------------------------------------------+ | GIT TIP", "the git history. | | | | Try adding in", "`git commit --amend`.\\n\" ) captured = capfd.readouterr() assert captured.out ==", "commit --amend`.\\n\" ) captured = capfd.readouterr() assert captured.out == tip1plainbody", "\"diff\", \"--id\", \"3\"]) assert ret == 2 captured = capfd.readouterr()", "with `git commit --amend`.\\n\" ) captured = capfd.readouterr() assert captured.out", "--amend`.\\n\" ) captured = capfd.readouterr() assert captured.out == tip1plainbody def", "the box format def test_category_and_id_mutex(capfd): ret = subprocess.call([\"git-fortune\", \"--category\", \"diff\",", "git_fortune.version import __version__ def test_help(capfd): subprocess.check_call([\"git-fortune\", \"-h\"]) captured = capfd.readouterr()", "--decorate --all`. | | | +-------------------------------------------------------------------------------+ \"\"\" ) captured =", "as 'git-fortune' or 'git fortune' \"\"\" ) in captured.out )", "representation of the git history. | | | | Try", "\"-h\"]) captured = capfd.readouterr() assert ( fix_line_endings( \"\"\" A fortune-like", "= capfd.readouterr() assert \"git-fortune {}\".format(__version__) in captured.out def test_tip_boxformat(capfd): subprocess.check_call([\"git-fortune\",", "capfd.readouterr() assert \"\" == captured.out assert \"argument --id: not allowed", "#3 | | | | `git log --graph` can show", "test_noargs(capfd): \"\"\"just make sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\"]) captured =", "def test_category_and_id_mutex(capfd): ret = subprocess.call([\"git-fortune\", \"--category\", \"diff\", \"--id\", \"3\"]) assert", "A fortune-like command for showing git tips Invoke it as", "in `--oneline --decorate --all`. | | | +-------------------------------------------------------------------------------+ \"\"\" )", "show you a tree-like representation of the git history. |", "== tip1plainbody def test_noargs(capfd): \"\"\"just make sure it doesn't crashfail\"\"\"", "\"\"\" ) captured = capfd.readouterr() assert captured.out == tip3boxbody def", "| Try adding in `--oneline --decorate --all`. | | |", "subprocess.check_call([\"git-fortune\", \"--format\", \"plain\", \"--id\", \"1\"]) tip1plainbody = fix_line_endings( \"Modify your", "GIT TIP #3 | | | | `git log --graph`", "\"3\"]) assert ret == 2 captured = capfd.readouterr() assert \"\"", "in captured.out # from the box format def test_category_and_id_mutex(capfd): ret", "def test_tip_plainformat(capfd): subprocess.check_call([\"git-fortune\", \"--format\", \"plain\", \"--id\", \"1\"]) tip1plainbody = fix_line_endings(", "box format def test_category_and_id_mutex(capfd): ret = subprocess.call([\"git-fortune\", \"--category\", \"diff\", \"--id\",", "captured.out # from the box format def test_category(capfd): \"\"\"just make", "= capfd.readouterr() assert captured.out == tip3boxbody def test_tip_plainformat(capfd): subprocess.check_call([\"git-fortune\", \"--format\",", "command for showing git tips Invoke it as 'git-fortune' or", "tip3boxbody def test_tip_plainformat(capfd): subprocess.check_call([\"git-fortune\", \"--format\", \"plain\", \"--id\", \"1\"]) tip1plainbody =", "subprocess.check_call([\"git-fortune\", \"-h\"]) captured = capfd.readouterr() assert ( fix_line_endings( \"\"\" A", "capfd.readouterr() assert captured.out == tip1plainbody def test_noargs(capfd): \"\"\"just make sure", "capfd.readouterr() assert \"GIT TIP #\" in captured.out # from the", "assert ret == 2 captured = capfd.readouterr() assert \"\" ==", "subprocess.check_call([\"git-fortune\"]) captured = capfd.readouterr() assert \"GIT TIP #\" in captured.out", ") captured = capfd.readouterr() assert captured.out == tip1plainbody def test_noargs(capfd):", "TIP #\" in captured.out # from the box format def", "format def test_category(capfd): \"\"\"just make sure it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\",", "test_version(capfd): subprocess.check_call([\"git-fortune\", \"--version\"]) captured = capfd.readouterr() assert \"git-fortune {}\".format(__version__) in", "pushing with `git commit --amend`.\\n\" ) captured = capfd.readouterr() assert", "| `git log --graph` can show you a tree-like representation", "captured.out ) def test_version(capfd): subprocess.check_call([\"git-fortune\", \"--version\"]) captured = capfd.readouterr() assert", "| GIT TIP #3 | | | | `git log", ") def test_version(capfd): subprocess.check_call([\"git-fortune\", \"--version\"]) captured = capfd.readouterr() assert \"git-fortune", "it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\"]) captured = capfd.readouterr() assert \"GIT TIP", "captured = capfd.readouterr() assert \"git-fortune {}\".format(__version__) in captured.out def test_tip_boxformat(capfd):", "a tree-like representation of the git history. | | |", "fix_line_endings from git_fortune.version import __version__ def test_help(capfd): subprocess.check_call([\"git-fortune\", \"-h\"]) captured", "subprocess.check_call([\"git-fortune\", \"--version\"]) captured = capfd.readouterr() assert \"git-fortune {}\".format(__version__) in captured.out", "--all`. | | | +-------------------------------------------------------------------------------+ \"\"\" ) captured = capfd.readouterr()", "import fix_line_endings from git_fortune.version import __version__ def test_help(capfd): subprocess.check_call([\"git-fortune\", \"-h\"])", "`git log --graph` can show you a tree-like representation of", "it doesn't crashfail\"\"\" subprocess.check_call([\"git-fortune\", \"--category\", \"diff\"]) captured = capfd.readouterr() assert", "subprocess.call([\"git-fortune\", \"--category\", \"diff\", \"--id\", \"3\"]) assert ret == 2 captured", "tips Invoke it as 'git-fortune' or 'git fortune' \"\"\" )", "fix_line_endings( \"\"\" A fortune-like command for showing git tips Invoke", "crashfail\"\"\" subprocess.check_call([\"git-fortune\", \"--category\", \"diff\"]) captured = capfd.readouterr() assert \"GIT TIP", "| | Try adding in `--oneline --decorate --all`. | |", "| | | +-------------------------------------------------------------------------------+ \"\"\" ) captured = capfd.readouterr() assert", "showing git tips Invoke it as 'git-fortune' or 'git fortune'", "'git-fortune' or 'git fortune' \"\"\" ) in captured.out ) def", "\"--category\", \"diff\"]) captured = capfd.readouterr() assert \"GIT TIP #\" in", "adding in `--oneline --decorate --all`. | | | +-------------------------------------------------------------------------------+ \"\"\"", "== captured.out assert \"argument --id: not allowed with argument --category\"", "\"diff\"]) captured = capfd.readouterr() assert \"GIT TIP #\" in captured.out", "# from the box format def test_category_and_id_mutex(capfd): ret = subprocess.call([\"git-fortune\",", "#\" in captured.out # from the box format def test_category(capfd):", "test_help(capfd): subprocess.check_call([\"git-fortune\", \"-h\"]) captured = capfd.readouterr() assert ( fix_line_endings( \"\"\"", "capfd.readouterr() assert \"git-fortune {}\".format(__version__) in captured.out def test_tip_boxformat(capfd): subprocess.check_call([\"git-fortune\", \"--id\",", "fortune-like command for showing git tips Invoke it as 'git-fortune'", "from git_fortune._compat import fix_line_endings from git_fortune.version import __version__ def test_help(capfd):", "test_category_and_id_mutex(capfd): ret = subprocess.call([\"git-fortune\", \"--category\", \"diff\", \"--id\", \"3\"]) assert ret", "captured = capfd.readouterr() assert captured.out == tip3boxbody def test_tip_plainformat(capfd): subprocess.check_call([\"git-fortune\",", "`--oneline --decorate --all`. | | | +-------------------------------------------------------------------------------+ \"\"\" ) captured", "{}\".format(__version__) in captured.out def test_tip_boxformat(capfd): subprocess.check_call([\"git-fortune\", \"--id\", \"3\"]) tip3boxbody =", "from git_fortune.version import __version__ def test_help(capfd): subprocess.check_call([\"git-fortune\", \"-h\"]) captured =", "\"--id\", \"1\"]) tip1plainbody = fix_line_endings( \"Modify your last commit before", "Try adding in `--oneline --decorate --all`. | | | +-------------------------------------------------------------------------------+", "| | | `git log --graph` can show you a", "the box format def test_category(capfd): \"\"\"just make sure it doesn't", "test_tip_boxformat(capfd): subprocess.check_call([\"git-fortune\", \"--id\", \"3\"]) tip3boxbody = fix_line_endings( \"\"\"\\ +-------------------------------------------------------------------------------+ |", "2 captured = capfd.readouterr() assert \"\" == captured.out assert \"argument", "def test_version(capfd): subprocess.check_call([\"git-fortune\", \"--version\"]) captured = capfd.readouterr() assert \"git-fortune {}\".format(__version__)", "assert \"git-fortune {}\".format(__version__) in captured.out def test_tip_boxformat(capfd): subprocess.check_call([\"git-fortune\", \"--id\", \"3\"])", "captured.out # from the box format def test_category_and_id_mutex(capfd): ret =", "<reponame>sirosen/git-fortune<filename>tests/test_basics.py import subprocess from git_fortune._compat import fix_line_endings from git_fortune.version import" ]
[ "keypoint_head=dict( # type='HRNetKeypointHead', # num_convs=8, # in_channels=256, # features_size=[256, 256,", "0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)))", "512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict(", "2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0,", "16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0,", "# in_channels=256, # features_size=[256, 256, 256, 256], # conv_out_channels=512, #", "bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]),", "norm_eval=True, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5),", "out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7,", "8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict(", "32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0.,", "loss_weight=50.0)), keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4,", "type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))) ) #optimizer = dict(lr=0.002) #lr_config", "# features_size=[256, 256, 256, 256], # conv_out_channels=512, # num_keypoints=5, #", "type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict(", "3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512,", "type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False,", "roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1,", "<reponame>VGrondin/CBNetV2_mask_remote<gh_stars>0 _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( type='FasterRCNN',", "num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'),", "256], # conv_out_channels=512, # num_keypoints=5, # loss_keypoint=dict(type='MSELoss', loss_weight=50.0)), keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4),", "depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True,", "2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8],", "# loss_keypoint=dict(type='MSELoss', loss_weight=50.0)), keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),", "dict( type='FasterRCNN', # pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1,", "= dict( type='FasterRCNN', # pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0,", "# type='HRNetKeypointHead', # num_convs=8, # in_channels=256, # features_size=[256, 256, 256,", "frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024,", "in_channels=256, # features_size=[256, 256, 256, 256], # conv_out_channels=512, # num_keypoints=5,", ".0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),", "type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256,", "model = dict( type='FasterRCNN', # pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4,", "pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1,", "neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead',", "loss_keypoint=dict(type='MSELoss', loss_weight=50.0)), keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256,", "out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict(", "target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)),", "_delete_=True, type='KeypointRoIHead', output_heatmaps=False, # keypoint_head=dict( # type='HRNetKeypointHead', # num_convs=8, #", "# pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3),", "use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( # type='StandardRoIHead', _delete_=True, type='KeypointRoIHead', output_heatmaps=False,", "# conv_out_channels=512, # num_keypoints=5, # loss_keypoint=dict(type='MSELoss', loss_weight=50.0)), keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4), bbox_roi_extractor=dict(", "[ '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( type='FasterRCNN', # pretrained='torchvision://resnet50', backbone=dict(", "out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5,", "fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1,", "type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( # type='StandardRoIHead', _delete_=True, type='KeypointRoIHead',", "reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))) ) #optimizer =", ".0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss',", "type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict(", "featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80,", "0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss',", "target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict(", "requires_grad=True), norm_eval=True, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256,", ") #optimizer = dict(lr=0.002) #lr_config = dict(step=[40, 55]) #total_epochs =", "ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder',", "keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8,", "feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16,", "backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN',", "in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8,", "style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict(", "type='KeypointRoIHead', output_heatmaps=False, # keypoint_head=dict( # type='HRNetKeypointHead', # num_convs=8, # in_channels=256,", "32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0,", "target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss',", "type='HRNetKeypointHead', # num_convs=8, # in_channels=256, # features_size=[256, 256, 256, 256],", "features_size=[256, 256, 256, 256], # conv_out_channels=512, # num_keypoints=5, # loss_keypoint=dict(type='MSELoss',", "sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024,", "upscale=4), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16,", "_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( type='FasterRCNN', #", "loss_weight=1.0)), roi_head=dict( # type='StandardRoIHead', _delete_=True, type='KeypointRoIHead', output_heatmaps=False, # keypoint_head=dict( #", "0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),", "1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict( type='FPN',", "rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0],", "type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True),", "type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4,", "bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0.,", "anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32,", "type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0.,", "1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( # type='StandardRoIHead',", "scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict(", "1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator',", "num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0,", "0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))) ) #optimizer", "loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( # type='StandardRoIHead', _delete_=True, type='KeypointRoIHead', output_heatmaps=False, # keypoint_head=dict(", "2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict( type='FPN', in_channels=[256,", "in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.],", "target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss',", "# num_keypoints=5, # loss_keypoint=dict(type='MSELoss', loss_weight=50.0)), keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign',", "loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( # type='StandardRoIHead', _delete_=True,", "# num_convs=8, # in_channels=256, # features_size=[256, 256, 256, 256], #", "#optimizer = dict(lr=0.002) #lr_config = dict(step=[40, 55]) #total_epochs = 60", "loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))) ) #optimizer = dict(lr=0.002) #lr_config = dict(step=[40,", "type='FasterRCNN', # pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2,", "num_keypoints=5, # loss_keypoint=dict(type='MSELoss', loss_weight=50.0)), keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7,", "loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( # type='StandardRoIHead', _delete_=True, type='KeypointRoIHead', output_heatmaps=False, #", "bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]),", "8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0],", "num_convs=8, # in_channels=256, # features_size=[256, 256, 256, 256], # conv_out_channels=512,", "'../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( type='FasterRCNN', # pretrained='torchvision://resnet50', backbone=dict( type='ResNet',", "use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))) ) #optimizer = dict(lr=0.002) #lr_config =", "bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]),", "output_heatmaps=False, # keypoint_head=dict( # type='HRNetKeypointHead', # num_convs=8, # in_channels=256, #", "in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256,", "256, 256], # conv_out_channels=512, # num_keypoints=5, # loss_keypoint=dict(type='MSELoss', loss_weight=50.0)), keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint',", "1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict(", "conv_out_channels=512, # num_keypoints=5, # loss_keypoint=dict(type='MSELoss', loss_weight=50.0)), keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4), bbox_roi_extractor=dict( type='SingleRoIExtractor',", "norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048],", "roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead',", "256, 256, 256], # conv_out_channels=512, # num_keypoints=5, # loss_keypoint=dict(type='MSELoss', loss_weight=50.0)),", "0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))) )", "] model = dict( type='FasterRCNN', # pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50,", "roi_head=dict( # type='StandardRoIHead', _delete_=True, type='KeypointRoIHead', output_heatmaps=False, # keypoint_head=dict( # type='HRNetKeypointHead',", "type='StandardRoIHead', _delete_=True, type='KeypointRoIHead', output_heatmaps=False, # keypoint_head=dict( # type='HRNetKeypointHead', # num_convs=8,", "# keypoint_head=dict( # type='HRNetKeypointHead', # num_convs=8, # in_channels=256, # features_size=[256,", "1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0,", "64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0,", "strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0,", "1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( #", "# type='StandardRoIHead', _delete_=True, type='KeypointRoIHead', output_heatmaps=False, # keypoint_head=dict( # type='HRNetKeypointHead', #", "= [ '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( type='FasterRCNN', # pretrained='torchvision://resnet50',", "output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256,", ".0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True,", "0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False,", "loss_weight=1.0))) ) #optimizer = dict(lr=0.002) #lr_config = dict(step=[40, 55]) #total_epochs", "type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]),", "num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2,", "loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))) ) #optimizer = dict(lr=0.002)", "loss_bbox=dict(type='L1Loss', loss_weight=1.0))) ) #optimizer = dict(lr=0.002) #lr_config = dict(step=[40, 55])", "16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder'," ]
[ "input_s3d, rois_3d): ''' input0: sparse 3d tensor rois_3d: 3d box,", "sampling_ratio ) # [171, 256, 7, 7] return output @staticmethod", "Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.", "is positive Note: the order of w and h inside", "rois_3d, self.output_size, self.spatial_scale, self.sampling_ratio ) return output def __repr__(self): tmpstr", "= roi_align_rotated_3d( input_d3d, rois_3d, self.output_size, self.spatial_scale, self.sampling_ratio ) return output", "spatial_scale, sampling_ratio): ''' output_size:[pooled_height, pooled_width] spatial_scale: size_of_map/size_of_original_image sampling_ratio: how many", "ctx.save_for_backward(roi) ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio = sampling_ratio", "sampling_ratio: 2 output = _C.roi_align_rotated_3d_forward( input, roi, spatial_scale, output_size[0], output_size[1],", "2 output = _C.roi_align_rotated_3d_forward( input, roi, spatial_scale, output_size[0], output_size[1], output_size[2],", "= _C.roi_align_rotated_3d_forward( input, roi, spatial_scale, output_size[0], output_size[1], output_size[2], sampling_ratio )", "(7,7,7) self.spatial_scale = spatial_scale # 0.25 self.sampling_ratio = sampling_ratio #", "the order of w and h inside of input and", "def __repr__(self): tmpstr = self.__class__.__name__ + \"(\" tmpstr += \"output_size=\"", "from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from SparseConvNet.sparseconvnet.tools_3d_2d", "spatial_scale = ctx.spatial_scale sampling_ratio = ctx.sampling_ratio bs, ch, h, w,", "= ctx.input_shape grad_input = _C.roi_align_rotated_3d_backward( grad_output, rois, spatial_scale, output_size[0], output_size[1],", "ctx.output_size spatial_scale = ctx.spatial_scale sampling_ratio = ctx.sampling_ratio bs, ch, h,", "roi, output_size, spatial_scale, sampling_ratio): ctx.save_for_backward(roi) ctx.output_size = _pair(output_size) ctx.spatial_scale =", "\"(\" tmpstr += \"output_size=\" + str(self.output_size) tmpstr += \", spatial_scale=\"", "from torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils", "degree, anti-clock wise is positive Note: the order of w", "spatial_scale=\" + str(self.spatial_scale) tmpstr += \", sampling_ratio=\" + str(self.sampling_ratio) tmpstr", "self.output_size = output_size # (7,7,7) self.spatial_scale = spatial_scale # 0.25", "how many points to use for bilinear_interpolate ''' super(ROIAlignRotated3D, self).__init__()", "+ str(self.output_size) tmpstr += \", spatial_scale=\" + str(self.spatial_scale) tmpstr +=", "output_size: [7,7,7] # sampling_ratio: 2 output = _C.roi_align_rotated_3d_forward( input, roi,", "_C.roi_align_rotated_3d_forward( input, roi, spatial_scale, output_size[0], output_size[1], output_size[2], sampling_ratio ) #", "of w and h inside of input and rois is", "super(ROIAlignRotated3D, self).__init__() self.output_size = output_size # (7,7,7) self.spatial_scale = spatial_scale", "math from torch import nn from torch.autograd import Function from", "[7,7,7] # sampling_ratio: 2 output = _C.roi_align_rotated_3d_forward( input, roi, spatial_scale,", "\", spatial_scale=\" + str(self.spatial_scale) tmpstr += \", sampling_ratio=\" + str(self.sampling_ratio)", "sampling_ratio, ) return grad_input, None, None, None, None roi_align_rotated_3d =", "def __init__(self, output_size, spatial_scale, sampling_ratio): ''' output_size:[pooled_height, pooled_width] spatial_scale: size_of_map/size_of_original_image", "_pair from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d import _C class _ROIAlignRotated3D(Function): @staticmethod", "unit: degree, anti-clock wise is positive Note: the order of", "[4, 256, 304, 200, 7] # roi: [171, 8] #", "All Rights Reserved. import torch, math from torch import nn", "sampling_ratio ctx.input_shape = input.size() # input: [4, 256, 304, 200,", "from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d import _C class _ROIAlignRotated3D(Function): @staticmethod def", "ctx.spatial_scale = spatial_scale ctx.sampling_ratio = sampling_ratio ctx.input_shape = input.size() #", "tmpstr += \", spatial_scale=\" + str(self.spatial_scale) tmpstr += \", sampling_ratio=\"", "output @staticmethod @once_differentiable def backward(ctx, grad_output): rois, = ctx.saved_tensors output_size", "_ROIAlignRotated3D.apply class ROIAlignRotated3D(nn.Module): def __init__(self, output_size, spatial_scale, sampling_ratio): ''' output_size:[pooled_height,", "sampling_ratio): ''' output_size:[pooled_height, pooled_width] spatial_scale: size_of_map/size_of_original_image sampling_ratio: how many points", "same as input0, yaw unit is rad, anti-clock wise is", "output_size[2], bs, ch, h, w, zsize, sampling_ratio, ) return grad_input,", "w] rois: [n,5] [batch_ind, center_w, center_h, roi_width, roi_height, theta] theta", "tmpstr = self.__class__.__name__ + \"(\" tmpstr += \"output_size=\" + str(self.output_size)", "None roi_align_rotated_3d = _ROIAlignRotated3D.apply class ROIAlignRotated3D(nn.Module): def __init__(self, output_size, spatial_scale,", "3d tensor rois_3d: 3d box, xyz order is same as", "is positive input: [batch_size, feature, h, w] rois: [n,5] [batch_ind,", "positive input: [batch_size, feature, h, w] rois: [n,5] [batch_ind, center_w,", "input: [batch_size, feature, h, w] rois: [n,5] [batch_ind, center_w, center_h,", "# input: [4, 256, 304, 200, 7] # roi: [171,", "self.spatial_scale = spatial_scale # 0.25 self.sampling_ratio = sampling_ratio # 2", ") return output def __repr__(self): tmpstr = self.__class__.__name__ + \"(\"", "self.__class__.__name__ + \"(\" tmpstr += \"output_size=\" + str(self.output_size) tmpstr +=", "@staticmethod def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): ctx.save_for_backward(roi) ctx.output_size", "sampling_ratio = ctx.sampling_ratio bs, ch, h, w, zsize = ctx.input_shape", "class ROIAlignRotated3D(nn.Module): def __init__(self, output_size, spatial_scale, sampling_ratio): ''' output_size:[pooled_height, pooled_width]", "__init__(self, output_size, spatial_scale, sampling_ratio): ''' output_size:[pooled_height, pooled_width] spatial_scale: size_of_map/size_of_original_image sampling_ratio:", "grad_input, None, None, None, None roi_align_rotated_3d = _ROIAlignRotated3D.apply class ROIAlignRotated3D(nn.Module):", "as input0, yaw unit is rad, anti-clock wise is positive", "@once_differentiable def backward(ctx, grad_output): rois, = ctx.saved_tensors output_size = ctx.output_size", "self.sampling_ratio ) return output def __repr__(self): tmpstr = self.__class__.__name__ +", "tmpstr += \", sampling_ratio=\" + str(self.sampling_ratio) tmpstr += \")\" return", "256, 7, 7] return output @staticmethod @once_differentiable def backward(ctx, grad_output):", "pooled_width] spatial_scale: size_of_map/size_of_original_image sampling_ratio: how many points to use for", "yaw unit is rad, anti-clock wise is positive input: [batch_size,", "roi_height, theta] theta unit: degree, anti-clock wise is positive Note:", "torch.nn.modules.utils import _pair from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d import _C class", "output_size[2], sampling_ratio ) # [171, 256, 7, 7] return output", "ctx.sampling_ratio bs, ch, h, w, zsize = ctx.input_shape grad_input =", "rois_3d: 3d box, xyz order is same as input0, yaw", "= sampling_ratio ctx.input_shape = input.size() # input: [4, 256, 304,", "unit is rad, anti-clock wise is positive input: [batch_size, feature,", "str(self.spatial_scale) tmpstr += \", sampling_ratio=\" + str(self.sampling_ratio) tmpstr += \")\"", "+ str(self.spatial_scale) tmpstr += \", sampling_ratio=\" + str(self.sampling_ratio) tmpstr +=", "return output @staticmethod @once_differentiable def backward(ctx, grad_output): rois, = ctx.saved_tensors", "# (7,7,7) self.spatial_scale = spatial_scale # 0.25 self.sampling_ratio = sampling_ratio", "sparse_3d_to_dense_2d(input_s3d) output = roi_align_rotated_3d( input_d3d, rois_3d, self.output_size, self.spatial_scale, self.sampling_ratio )", "self.spatial_scale, self.sampling_ratio ) return output def __repr__(self): tmpstr = self.__class__.__name__", "box, xyz order is same as input0, yaw unit is", "= ctx.saved_tensors output_size = ctx.output_size spatial_scale = ctx.spatial_scale sampling_ratio =", "rad, anti-clock wise is positive input: [batch_size, feature, h, w]", "rois is different. ''' input_d3d = sparse_3d_to_dense_2d(input_s3d) output = roi_align_rotated_3d(", "rois, spatial_scale, output_size[0], output_size[1], output_size[2], bs, ch, h, w, zsize,", "forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): ctx.save_for_backward(roi) ctx.output_size = _pair(output_size)", "w, zsize, sampling_ratio, ) return grad_input, None, None, None, None", "rois_3d): ''' input0: sparse 3d tensor rois_3d: 3d box, xyz", "tmpstr += \"output_size=\" + str(self.output_size) tmpstr += \", spatial_scale=\" +", "# 2 def forward(self, input_s3d, rois_3d): ''' input0: sparse 3d", "spatial_scale, output_size[0], output_size[1], output_size[2], sampling_ratio ) # [171, 256, 7,", "output = roi_align_rotated_3d( input_d3d, rois_3d, self.output_size, self.spatial_scale, self.sampling_ratio ) return", "spatial_scale: 0.25 # output_size: [7,7,7] # sampling_ratio: 2 output =", "output_size, spatial_scale, sampling_ratio): ''' output_size:[pooled_height, pooled_width] spatial_scale: size_of_map/size_of_original_image sampling_ratio: how", "''' output_size:[pooled_height, pooled_width] spatial_scale: size_of_map/size_of_original_image sampling_ratio: how many points to", "order is same as input0, yaw unit is rad, anti-clock", "sparse 3d tensor rois_3d: 3d box, xyz order is same", "import sparse_3d_to_dense_2d import _C class _ROIAlignRotated3D(Function): @staticmethod def forward(ctx, input,", "\"output_size=\" + str(self.output_size) tmpstr += \", spatial_scale=\" + str(self.spatial_scale) tmpstr", "+= \", sampling_ratio=\" + str(self.sampling_ratio) tmpstr += \")\" return tmpstr", "spatial_scale # 0.25 self.sampling_ratio = sampling_ratio # 2 def forward(self,", "torch import nn from torch.autograd import Function from torch.autograd.function import", "nn from torch.autograd import Function from torch.autograd.function import once_differentiable from", "anti-clock wise is positive input: [batch_size, feature, h, w] rois:", "ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio = sampling_ratio ctx.input_shape", "to use for bilinear_interpolate ''' super(ROIAlignRotated3D, self).__init__() self.output_size = output_size", "SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d import _C class _ROIAlignRotated3D(Function): @staticmethod def forward(ctx,", "wise is positive Note: the order of w and h", "[n,5] [batch_ind, center_w, center_h, roi_width, roi_height, theta] theta unit: degree,", "input0, yaw unit is rad, anti-clock wise is positive input:", "torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import", "= ctx.output_size spatial_scale = ctx.spatial_scale sampling_ratio = ctx.sampling_ratio bs, ch,", "None, None, None roi_align_rotated_3d = _ROIAlignRotated3D.apply class ROIAlignRotated3D(nn.Module): def __init__(self,", "= self.__class__.__name__ + \"(\" tmpstr += \"output_size=\" + str(self.output_size) tmpstr", "+ \"(\" tmpstr += \"output_size=\" + str(self.output_size) tmpstr += \",", "output_size = ctx.output_size spatial_scale = ctx.spatial_scale sampling_ratio = ctx.sampling_ratio bs,", "# sampling_ratio: 2 output = _C.roi_align_rotated_3d_forward( input, roi, spatial_scale, output_size[0],", "h, w, zsize = ctx.input_shape grad_input = _C.roi_align_rotated_3d_backward( grad_output, rois,", "roi_align_rotated_3d = _ROIAlignRotated3D.apply class ROIAlignRotated3D(nn.Module): def __init__(self, output_size, spatial_scale, sampling_ratio):", "different. ''' input_d3d = sparse_3d_to_dense_2d(input_s3d) output = roi_align_rotated_3d( input_d3d, rois_3d,", "8] # spatial_scale: 0.25 # output_size: [7,7,7] # sampling_ratio: 2", "sparse_3d_to_dense_2d import _C class _ROIAlignRotated3D(Function): @staticmethod def forward(ctx, input, roi,", "w, zsize = ctx.input_shape grad_input = _C.roi_align_rotated_3d_backward( grad_output, rois, spatial_scale,", "= _ROIAlignRotated3D.apply class ROIAlignRotated3D(nn.Module): def __init__(self, output_size, spatial_scale, sampling_ratio): '''", "xyz order is same as input0, yaw unit is rad,", "_pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio = sampling_ratio ctx.input_shape = input.size()", "import torch, math from torch import nn from torch.autograd import", "7] return output @staticmethod @once_differentiable def backward(ctx, grad_output): rois, =", "spatial_scale, output_size[0], output_size[1], output_size[2], bs, ch, h, w, zsize, sampling_ratio,", "use for bilinear_interpolate ''' super(ROIAlignRotated3D, self).__init__() self.output_size = output_size #", "= sampling_ratio # 2 def forward(self, input_s3d, rois_3d): ''' input0:", "tensor rois_3d: 3d box, xyz order is same as input0,", "bs, ch, h, w, zsize, sampling_ratio, ) return grad_input, None,", "theta] theta unit: degree, anti-clock wise is positive Note: the", "input, roi, output_size, spatial_scale, sampling_ratio): ctx.save_for_backward(roi) ctx.output_size = _pair(output_size) ctx.spatial_scale", "roi_align_rotated_3d( input_d3d, rois_3d, self.output_size, self.spatial_scale, self.sampling_ratio ) return output def", "ch, h, w, zsize, sampling_ratio, ) return grad_input, None, None,", "input and rois is different. ''' input_d3d = sparse_3d_to_dense_2d(input_s3d) output", "roi, spatial_scale, output_size[0], output_size[1], output_size[2], sampling_ratio ) # [171, 256,", "def backward(ctx, grad_output): rois, = ctx.saved_tensors output_size = ctx.output_size spatial_scale", "__repr__(self): tmpstr = self.__class__.__name__ + \"(\" tmpstr += \"output_size=\" +", "256, 304, 200, 7] # roi: [171, 8] # spatial_scale:", "= spatial_scale # 0.25 self.sampling_ratio = sampling_ratio # 2 def", "= output_size # (7,7,7) self.spatial_scale = spatial_scale # 0.25 self.sampling_ratio", "input_d3d, rois_3d, self.output_size, self.spatial_scale, self.sampling_ratio ) return output def __repr__(self):", "and its affiliates. All Rights Reserved. import torch, math from", "ctx.input_shape grad_input = _C.roi_align_rotated_3d_backward( grad_output, rois, spatial_scale, output_size[0], output_size[1], output_size[2],", "ctx.sampling_ratio = sampling_ratio ctx.input_shape = input.size() # input: [4, 256,", "= spatial_scale ctx.sampling_ratio = sampling_ratio ctx.input_shape = input.size() # input:", "Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from", "Facebook, Inc. and its affiliates. All Rights Reserved. import torch,", "import _pair from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d import _C class _ROIAlignRotated3D(Function):", "2 def forward(self, input_s3d, rois_3d): ''' input0: sparse 3d tensor", "from torch.nn.modules.utils import _pair from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d import _C", "Rights Reserved. import torch, math from torch import nn from", "''' super(ROIAlignRotated3D, self).__init__() self.output_size = output_size # (7,7,7) self.spatial_scale =", "[171, 8] # spatial_scale: 0.25 # output_size: [7,7,7] # sampling_ratio:", "# 0.25 self.sampling_ratio = sampling_ratio # 2 def forward(self, input_s3d,", "center_w, center_h, roi_width, roi_height, theta] theta unit: degree, anti-clock wise", "anti-clock wise is positive Note: the order of w and", "Note: the order of w and h inside of input", "size_of_map/size_of_original_image sampling_ratio: how many points to use for bilinear_interpolate '''", "class _ROIAlignRotated3D(Function): @staticmethod def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):", "zsize, sampling_ratio, ) return grad_input, None, None, None, None roi_align_rotated_3d", "from torch import nn from torch.autograd import Function from torch.autograd.function", "output_size:[pooled_height, pooled_width] spatial_scale: size_of_map/size_of_original_image sampling_ratio: how many points to use", "output_size[0], output_size[1], output_size[2], sampling_ratio ) # [171, 256, 7, 7]", "= _pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio = sampling_ratio ctx.input_shape =", "theta unit: degree, anti-clock wise is positive Note: the order", "[batch_ind, center_w, center_h, roi_width, roi_height, theta] theta unit: degree, anti-clock", "input.size() # input: [4, 256, 304, 200, 7] # roi:", "# roi: [171, 8] # spatial_scale: 0.25 # output_size: [7,7,7]", "return output def __repr__(self): tmpstr = self.__class__.__name__ + \"(\" tmpstr", "feature, h, w] rois: [n,5] [batch_ind, center_w, center_h, roi_width, roi_height,", "grad_input = _C.roi_align_rotated_3d_backward( grad_output, rois, spatial_scale, output_size[0], output_size[1], output_size[2], bs,", "self).__init__() self.output_size = output_size # (7,7,7) self.spatial_scale = spatial_scale #", "''' input_d3d = sparse_3d_to_dense_2d(input_s3d) output = roi_align_rotated_3d( input_d3d, rois_3d, self.output_size,", "and h inside of input and rois is different. '''", "wise is positive input: [batch_size, feature, h, w] rois: [n,5]", "many points to use for bilinear_interpolate ''' super(ROIAlignRotated3D, self).__init__() self.output_size", "is same as input0, yaw unit is rad, anti-clock wise", "7] # roi: [171, 8] # spatial_scale: 0.25 # output_size:", "is rad, anti-clock wise is positive input: [batch_size, feature, h,", "def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): ctx.save_for_backward(roi) ctx.output_size =", "w and h inside of input and rois is different.", "output = _C.roi_align_rotated_3d_forward( input, roi, spatial_scale, output_size[0], output_size[1], output_size[2], sampling_ratio", "304, 200, 7] # roi: [171, 8] # spatial_scale: 0.25", "0.25 # output_size: [7,7,7] # sampling_ratio: 2 output = _C.roi_align_rotated_3d_forward(", "@staticmethod @once_differentiable def backward(ctx, grad_output): rois, = ctx.saved_tensors output_size =", "its affiliates. All Rights Reserved. import torch, math from torch", "h inside of input and rois is different. ''' input_d3d", "= input.size() # input: [4, 256, 304, 200, 7] #", "output_size # (7,7,7) self.spatial_scale = spatial_scale # 0.25 self.sampling_ratio =", "self.output_size, self.spatial_scale, self.sampling_ratio ) return output def __repr__(self): tmpstr =", "str(self.output_size) tmpstr += \", spatial_scale=\" + str(self.spatial_scale) tmpstr += \",", "= _C.roi_align_rotated_3d_backward( grad_output, rois, spatial_scale, output_size[0], output_size[1], output_size[2], bs, ch,", "for bilinear_interpolate ''' super(ROIAlignRotated3D, self).__init__() self.output_size = output_size # (7,7,7)", "torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from SparseConvNet.sparseconvnet.tools_3d_2d import", "Inc. and its affiliates. All Rights Reserved. import torch, math", "spatial_scale, sampling_ratio): ctx.save_for_backward(roi) ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio", "(c) Facebook, Inc. and its affiliates. All Rights Reserved. import", "inside of input and rois is different. ''' input_d3d =", "output def __repr__(self): tmpstr = self.__class__.__name__ + \"(\" tmpstr +=", "points to use for bilinear_interpolate ''' super(ROIAlignRotated3D, self).__init__() self.output_size =", "None, None roi_align_rotated_3d = _ROIAlignRotated3D.apply class ROIAlignRotated3D(nn.Module): def __init__(self, output_size,", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights", "sampling_ratio): ctx.save_for_backward(roi) ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio =", "output_size[1], output_size[2], bs, ch, h, w, zsize, sampling_ratio, ) return", "forward(self, input_s3d, rois_3d): ''' input0: sparse 3d tensor rois_3d: 3d", "import nn from torch.autograd import Function from torch.autograd.function import once_differentiable", "3d box, xyz order is same as input0, yaw unit", "grad_output): rois, = ctx.saved_tensors output_size = ctx.output_size spatial_scale = ctx.spatial_scale", "<reponame>picwoon/As_built_BIM # Copyright (c) Facebook, Inc. and its affiliates. All", "_C.roi_align_rotated_3d_backward( grad_output, rois, spatial_scale, output_size[0], output_size[1], output_size[2], bs, ch, h,", "and rois is different. ''' input_d3d = sparse_3d_to_dense_2d(input_s3d) output =", "sampling_ratio: how many points to use for bilinear_interpolate ''' super(ROIAlignRotated3D,", "self.sampling_ratio = sampling_ratio # 2 def forward(self, input_s3d, rois_3d): '''", "positive Note: the order of w and h inside of", "spatial_scale ctx.sampling_ratio = sampling_ratio ctx.input_shape = input.size() # input: [4,", "input, roi, spatial_scale, output_size[0], output_size[1], output_size[2], sampling_ratio ) # [171,", "roi_width, roi_height, theta] theta unit: degree, anti-clock wise is positive", "input: [4, 256, 304, 200, 7] # roi: [171, 8]", "ctx.saved_tensors output_size = ctx.output_size spatial_scale = ctx.spatial_scale sampling_ratio = ctx.sampling_ratio", ") return grad_input, None, None, None, None roi_align_rotated_3d = _ROIAlignRotated3D.apply", "order of w and h inside of input and rois", "input_d3d = sparse_3d_to_dense_2d(input_s3d) output = roi_align_rotated_3d( input_d3d, rois_3d, self.output_size, self.spatial_scale,", "0.25 self.sampling_ratio = sampling_ratio # 2 def forward(self, input_s3d, rois_3d):", "# output_size: [7,7,7] # sampling_ratio: 2 output = _C.roi_align_rotated_3d_forward( input,", "import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair", "import _C class _ROIAlignRotated3D(Function): @staticmethod def forward(ctx, input, roi, output_size,", ") # [171, 256, 7, 7] return output @staticmethod @once_differentiable", "affiliates. All Rights Reserved. import torch, math from torch import", "200, 7] # roi: [171, 8] # spatial_scale: 0.25 #", "center_h, roi_width, roi_height, theta] theta unit: degree, anti-clock wise is", "Reserved. import torch, math from torch import nn from torch.autograd", "import once_differentiable from torch.nn.modules.utils import _pair from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d", "''' input0: sparse 3d tensor rois_3d: 3d box, xyz order", "_ROIAlignRotated3D(Function): @staticmethod def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): ctx.save_for_backward(roi)", "output_size, spatial_scale, sampling_ratio): ctx.save_for_backward(roi) ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale", "None, None, None, None roi_align_rotated_3d = _ROIAlignRotated3D.apply class ROIAlignRotated3D(nn.Module): def", "input0: sparse 3d tensor rois_3d: 3d box, xyz order is", "h, w, zsize, sampling_ratio, ) return grad_input, None, None, None,", "rois: [n,5] [batch_ind, center_w, center_h, roi_width, roi_height, theta] theta unit:", "+= \"output_size=\" + str(self.output_size) tmpstr += \", spatial_scale=\" + str(self.spatial_scale)", "bs, ch, h, w, zsize = ctx.input_shape grad_input = _C.roi_align_rotated_3d_backward(", "ctx.spatial_scale sampling_ratio = ctx.sampling_ratio bs, ch, h, w, zsize =", "roi: [171, 8] # spatial_scale: 0.25 # output_size: [7,7,7] #", "output_size[1], output_size[2], sampling_ratio ) # [171, 256, 7, 7] return", "7, 7] return output @staticmethod @once_differentiable def backward(ctx, grad_output): rois,", "grad_output, rois, spatial_scale, output_size[0], output_size[1], output_size[2], bs, ch, h, w,", "return grad_input, None, None, None, None roi_align_rotated_3d = _ROIAlignRotated3D.apply class", "once_differentiable from torch.nn.modules.utils import _pair from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d import", "ROIAlignRotated3D(nn.Module): def __init__(self, output_size, spatial_scale, sampling_ratio): ''' output_size:[pooled_height, pooled_width] spatial_scale:", "zsize = ctx.input_shape grad_input = _C.roi_align_rotated_3d_backward( grad_output, rois, spatial_scale, output_size[0],", "backward(ctx, grad_output): rois, = ctx.saved_tensors output_size = ctx.output_size spatial_scale =", "spatial_scale: size_of_map/size_of_original_image sampling_ratio: how many points to use for bilinear_interpolate", "h, w] rois: [n,5] [batch_ind, center_w, center_h, roi_width, roi_height, theta]", "_C class _ROIAlignRotated3D(Function): @staticmethod def forward(ctx, input, roi, output_size, spatial_scale,", "# spatial_scale: 0.25 # output_size: [7,7,7] # sampling_ratio: 2 output", "is different. ''' input_d3d = sparse_3d_to_dense_2d(input_s3d) output = roi_align_rotated_3d( input_d3d,", "of input and rois is different. ''' input_d3d = sparse_3d_to_dense_2d(input_s3d)", "= ctx.spatial_scale sampling_ratio = ctx.sampling_ratio bs, ch, h, w, zsize", "# [171, 256, 7, 7] return output @staticmethod @once_differentiable def", "sampling_ratio # 2 def forward(self, input_s3d, rois_3d): ''' input0: sparse", "+= \", spatial_scale=\" + str(self.spatial_scale) tmpstr += \", sampling_ratio=\" +", "output_size[0], output_size[1], output_size[2], bs, ch, h, w, zsize, sampling_ratio, )", "def forward(self, input_s3d, rois_3d): ''' input0: sparse 3d tensor rois_3d:", "ctx.input_shape = input.size() # input: [4, 256, 304, 200, 7]", "= ctx.sampling_ratio bs, ch, h, w, zsize = ctx.input_shape grad_input", "ch, h, w, zsize = ctx.input_shape grad_input = _C.roi_align_rotated_3d_backward( grad_output,", "[batch_size, feature, h, w] rois: [n,5] [batch_ind, center_w, center_h, roi_width,", "torch, math from torch import nn from torch.autograd import Function", "= sparse_3d_to_dense_2d(input_s3d) output = roi_align_rotated_3d( input_d3d, rois_3d, self.output_size, self.spatial_scale, self.sampling_ratio", "[171, 256, 7, 7] return output @staticmethod @once_differentiable def backward(ctx,", "rois, = ctx.saved_tensors output_size = ctx.output_size spatial_scale = ctx.spatial_scale sampling_ratio", "bilinear_interpolate ''' super(ROIAlignRotated3D, self).__init__() self.output_size = output_size # (7,7,7) self.spatial_scale" ]
[ "if (dictChars is not None) and (chars == True): #", "!= 0: yield x_batch, y_batch def getChunkType(tok, idxToTag): tagName =", "\"ERROR: Can not find file {}.\".format(filename) super(ParrotIOError, self).__init__(message) # Class", "default and chunkType is not None: # Add a chunk.", "(chunkType, chunkStart, chunkEnd) Example: seq = [4, 5, 0, 3]", "charIDs, word # word id return word return f def", "= set() with open(filename, encoding='utf-8') as f: for line in", "set() for words, _ in dataset: for word in words:", "wordID = dictionary[word] embeddings[wordID] = np.asarray(embedding) np.savez_compressed(trimmedFilename, embeddings=embeddings) # store", "- 1: f.write(\"{}\\n\".format(word)) else: f.write(word) def loadDictionary(filename): try: d =", "seq[:maxLength] + [padtok]*max(maxLength - len(seq), 0) sequencePadded += [seq_] sequenceLength", "data: if len(x_batch) == minibatchSize: yield x_batch, y_batch x_batch, y_batch", "elif tok != default: tokChunkClass, tokChunkType = getChunkType(tok, idxToTag) if", "for (x, y) in data: if len(x_batch) == minibatchSize: yield", "[] chunkType, chunkStart = None, None for i, tok in", "enumerate(f): word = word.strip() d[word] = idx except IOError: raise", "is None: chunkType, chunkStart = tokChunkType, i elif tokChunkType !=", "or tokChunkClass == \"B\": chunk = (chunkType, chunkStart, i) chunks.append(chunk)", "= None def __iter__(self): niter = 0 with open(self.filename, encoding='utf-8')", "\"\"\"Given a sequence of tags, group entities and their position", "4)] \"\"\" default = tags[NONE] idxToTag = {idx: tag for", "word = word.lower() if word.isdigit(): word = NUM # word", "None and niter > self.maxIter: break yield words, tags words,", "dictWords, dictTags def getCharDictionary(dataset): dictChar = set() for words, _", "minibatchSize): x_batch, y_batch = [], [] for (x, y) in", "[], [] for line in f: line = line.strip() #", "open(gloveFilename, encoding='utf-8') as f: for line in f: line =", "[] else: ls = line.split(' ') word, tag = ls[0],ls[-1]", "+ start of a chunk! elif tok != default: tokChunkClass,", "1 if self.maxIter is not None and niter > self.maxIter:", "def getProcessingWord(dictWords=None, dictChars=None, lowercase=False, chars=False, allowUNK=True): def f(word): # char", "sp, sl = _padSequences(seq, padtok, maxLengthWord) sequencePadded += [sp] sequenceLength", "that takes a tag as input self.maxIter = maxIter #", "0: niter += 1 if self.maxIter is not None and", "chunkStart, i) chunks.append(chunk) chunkType, chunkStart = None, None # End", "1: maxLength = max(map(lambda x : len(x), sequences)) sequencePadded, sequenceLength", "file {}.\".format(filename) super(ParrotIOError, self).__init__(message) # Class that iterates over CoNLL", "dictChar.update(word) return dictChar #filename - path wo file with vectors", "and chunkType is not None: # Add a chunk. chunk", "if i != len(dictionary) - 1: f.write(\"{}\\n\".format(word)) else: f.write(word) def", "[4, 4, 0, 0, ...] sequence of labels tags: dict[\"O\"]", "str(len(dictGlove)) +\" tokens\") return dictGlove def saveDictionary(dictionary, filename): printLog(\"SAVE\") with", "True): charIDs = [] for char in word: if (char", "= np.zeros([len(dictionary), dim]) with open(gloveFilename, encoding='utf-8') as f: for line", "dim]) with open(gloveFilename, encoding='utf-8') as f: for line in f:", "and end if (len(line) == 0 or line.startswith(\"-DOCSTART-\")): if len(words)", "word.strip() d[word] = idx except IOError: raise ParrotIOError(filename) return d", "word = dictWords[word] elif allowUNK: word = dictWords[UNK] else: raise", "seq in sequences: seq = list(seq) seq_ = seq[:maxLength] +", "IOError: raise ParrotIOError(filename) def getProcessingWord(dictWords=None, dictChars=None, lowercase=False, chars=False, allowUNK=True): def", "len(x), seq)) for seq in sequences]) sequencePadded, sequenceLength = [],", "y_batch = [], [] if type(x[0]) == tuple: x =", "(dictChars is not None) and (chars == True): charIDs =", "self.processingTag = processingTag # function that takes a tag as", "# end condition if chunkType is not None: chunk =", "niter > self.maxIter: break yield words, tags words, tags =", "def _padSequences(sequences, padtok, maxLength): sequencePadded, sequenceLength = [], [] for", "embedding = [float(x) for x in line[1:]] #glove coords wordID", "+\" tokens\") return dictGlove def saveDictionary(dictionary, filename): printLog(\"SAVE\") with open(filename,", "sequenceLength += [sl] maxLengthSentence = max(map(lambda x : len(x), sequences))", "that iterates over CoNLL Dataset class CoNLLDataset(object): def __init__(self, filename,", "for idx, word in enumerate(f): word = word.strip() d[word] =", "in sequences]) sequencePadded, sequenceLength = [], [] for seq in", "# word id return word return f def _padSequences(sequences, padtok,", "yield self.length = None def __iter__(self): niter = 0 with", "their position Args: seq: [4, 4, 0, 0, ...] sequence", "spaces in start and end if (len(line) == 0 or", "return data[\"embeddings\"] except IOError: raise ParrotIOError(filename) def getProcessingWord(dictWords=None, dictChars=None, lowercase=False,", "return self.length #Create a dictionary from dataset def getDictionary(datasets): printLog(\"Building", "dictGlove = set() with open(filename, encoding='utf-8') as f: for line", "chunkStart, chunkEnd) Example: seq = [4, 5, 0, 3] tags", "x_batch, y_batch = [], [] if type(x[0]) == tuple: x", "encoding='utf-8') as f: for i, word in enumerate(dictionary): if i", "not None) and (chars == True): charIDs = [] for", "y_batch += [y] if len(x_batch) != 0: yield x_batch, y_batch", "of sentences to yield self.length = None def __iter__(self): niter", "np import os from .logger import printLog UNK = \"$UNK$\"", "[] for (x, y) in data: if len(x_batch) == minibatchSize:", "seq: [4, 4, 0, 0, ...] sequence of labels tags:", "line.split(' ') word, tag = ls[0],ls[-1] if self.processingWord is not", "= processingTag # function that takes a tag as input", "[y] if len(x_batch) != 0: yield x_batch, y_batch def getChunkType(tok,", "= (chunkType, chunkStart, i) chunks.append(chunk) chunkType, chunkStart = tokChunkType, i", "tokChunkClass == \"B\": chunk = (chunkType, chunkStart, i) chunks.append(chunk) chunkType,", "i) chunks.append(chunk) chunkType, chunkStart = tokChunkType, i else: pass #", "chunk + start of a chunk! elif tok != default:", "tokChunkType != chunkType or tokChunkClass == \"B\": chunk = (chunkType,", "y) in data: if len(x_batch) == minibatchSize: yield x_batch, y_batch", "= [4, 5, 0, 3] tags = {\"B-PER\": 4, \"I-PER\":", "if nlevels == 1: maxLength = max(map(lambda x : len(x),", "[padtok]*maxLengthWord, maxLengthSentence) sequenceLength, _ = _padSequences(sequenceLength, 0, maxLengthSentence) return sequencePadded,", "3} result = [(\"PER\", 0, 2), (\"LOC\", 3, 4)] \"\"\"", "ids and word id return charIDs, word # word id", "words, tags words, tags = [], [] else: ls =", "None, None # End of a chunk + start of", "tags[NONE] idxToTag = {idx: tag for tag, idx in tags.items()}", "Args: seq: [4, 4, 0, 0, ...] sequence of labels", "_padSequences(seq, padtok, maxLengthWord) sequencePadded += [sp] sequenceLength += [sl] maxLengthSentence", "raise ParrotIOError(filename) def getProcessingWord(dictWords=None, dictChars=None, lowercase=False, chars=False, allowUNK=True): def f(word):", "x: len(x), seq)) for seq in sequences]) sequencePadded, sequenceLength =", "None: # Add a chunk. chunk = (chunkType, chunkStart, i)", "0 or line.startswith(\"-DOCSTART-\")): if len(words) != 0: niter += 1", "gloveFilename, trimmedFilename, dim): embeddings = np.zeros([len(dictionary), dim]) with open(gloveFilename, encoding='utf-8')", "minibatchSize: yield x_batch, y_batch x_batch, y_batch = [], [] if", "in f: line = line.strip().split(' ') word = line[0] if", "is not None) and (chars == True): charIDs = []", "# End of a chunk 1 if tok == default", "np.savez_compressed(trimmedFilename, embeddings=embeddings) # store glove matrix def getCompactGloveVectors(filename): try: with", "printLog(\"SAVE\") with open(filename, \"w\", encoding='utf-8') as f: for i, word", "def f(word): # char ids for word if (dictChars is", "len(x_batch) == minibatchSize: yield x_batch, y_batch x_batch, y_batch = [],", "[(\"PER\", 0, 2), (\"LOC\", 3, 4)] \"\"\" default = tags[NONE]", "word in dictionary: embedding = [float(x) for x in line[1:]]", "- len(seq), 0) sequencePadded += [seq_] sequenceLength += [min(len(seq), maxLength)]", "# Add a chunk. chunk = (chunkType, chunkStart, i) chunks.append(chunk)", "= tags[NONE] idxToTag = {idx: tag for tag, idx in", "try: with np.load(filename) as data: return data[\"embeddings\"] except IOError: raise", "[] if type(x[0]) == tuple: x = zip(*x) x_batch +=", "in tags.items()} chunks = [] chunkType, chunkStart = None, None", "# all words are same length sp, sl = _padSequences(seq,", "list(seq) seq_ = seq[:maxLength] + [padtok]*max(maxLength - len(seq), 0) sequencePadded", "= processingWord # function that takes a word as input", "dataset: for word in words: dictChar.update(word) return dictChar #filename -", "for word if (dictChars is not None) and (chars ==", "maxLength = max(map(lambda x : len(x), sequences)) sequencePadded, sequenceLength =", "if self.processingWord is not None: word = self.processingWord(word) if self.processingTag", "printLog(\"Building dictionary\") dictGlove = set() with open(filename, encoding='utf-8') as f:", "type(x[0]) == tuple: x = zip(*x) x_batch += [x] y_batch", "lowercase: word = word.lower() if word.isdigit(): word = NUM #", "np.load(filename) as data: return data[\"embeddings\"] except IOError: raise ParrotIOError(filename) def", "== 1: maxLength = max(map(lambda x : len(x), sequences)) sequencePadded,", "_ = _padSequences(sequenceLength, 0, maxLengthSentence) return sequencePadded, sequenceLength def minibatches(data,", "0, 2), (\"LOC\", 3, 4)] \"\"\" default = tags[NONE] idxToTag", "sequencePadded += [sp] sequenceLength += [sl] maxLengthSentence = max(map(lambda x", "of a chunk + start of a chunk! elif tok", "tokChunkType = getChunkType(tok, idxToTag) if chunkType is None: chunkType, chunkStart", "dictTags = set() for dataset in datasets: for words, tags", "= line.strip() # delete spaces in start and end if", "= seq[:maxLength] + [padtok]*max(maxLength - len(seq), 0) sequencePadded += [seq_]", "input self.maxIter = maxIter # max number of sentences to", "line.strip().split(' ')[0] dictGlove.add(word) printLog(\"DONE: \"+ str(len(dictGlove)) +\" tokens\") return dictGlove", "in line[1:]] #glove coords wordID = dictionary[word] embeddings[wordID] = np.asarray(embedding)", "+ str(len(dictWords)) + \" size\") return dictWords, dictTags def getCharDictionary(dataset):", "= [], [] for seq in sequences: # all words", "chunk = (chunkType, chunkStart, i) chunks.append(chunk) chunkType, chunkStart = tokChunkType,", "tok != default: tokChunkClass, tokChunkType = getChunkType(tok, idxToTag) if chunkType", "chunkType, chunkStart = tokChunkType, i elif tokChunkType != chunkType or", "tags: dict[\"O\"] = 4 Returns: list of (chunkType, chunkStart, chunkEnd)", "as np import os from .logger import printLog UNK =", "dictionary from dataset def getDictionary(datasets): printLog(\"Building dictionary: \") dictWords =", "= idx except IOError: raise ParrotIOError(filename) return d def exportCompactGloveVectors(dictionary,", "of (chunkType, chunkStart, chunkEnd) Example: seq = [4, 5, 0,", "set() with open(filename, encoding='utf-8') as f: for line in f:", "line = line.strip() # delete spaces in start and end", "as f: words, tags = [], [] for line in", "with open(filename, encoding='utf-8') as f: for line in f: word", "allowUNK=True): def f(word): # char ids for word if (dictChars", "words, tags = [], [] else: ls = line.split(' ')", "def __init__(self, filename): message = \"ERROR: Can not find file", "None: chunkType, chunkStart = tokChunkType, i elif tokChunkType != chunkType", "dictionary\") dictGlove = set() with open(filename, encoding='utf-8') as f: for", "2), (\"LOC\", 3, 4)] \"\"\" default = tags[NONE] idxToTag =", "encoding='utf-8') as f: for line in f: word = line.strip().split('", "tuple: x = zip(*x) x_batch += [x] y_batch += [y]", "= self.processingTag(tag) words += [word] tags += [tag] def __len__(self):", "yield x_batch, y_batch def getChunkType(tok, idxToTag): tagName = idxToTag[tok] tagClass", "= line.strip().split(' ')[0] dictGlove.add(word) printLog(\"DONE: \"+ str(len(dictGlove)) +\" tokens\") return", "ls = line.split(' ') word, tag = ls[0],ls[-1] if self.processingWord", "tags = [], [] for line in f: line =", "= [], [] for seq in sequences: seq = list(seq)", "for line in f: line = line.strip() # delete spaces", "[], [] for seq in sequences: seq = list(seq) seq_", "4, \"I-PER\": 5, \"B-LOC\": 3} result = [(\"PER\", 0, 2),", "len(x_batch) != 0: yield x_batch, y_batch def getChunkType(tok, idxToTag): tagName", "dictWords[word] elif allowUNK: word = dictWords[UNK] else: raise Exception(\"Unknow tag.\")", "dictWords[UNK] else: raise Exception(\"Unknow tag.\") if (dictChars is not None)", "self.filename = filename self.processingWord = processingWord # function that takes", "return dictChar #filename - path wo file with vectors def", "dict[\"O\"] = 4 Returns: list of (chunkType, chunkStart, chunkEnd) Example:", "in sequences: # all words are same length sp, sl", "in words: dictChar.update(word) return dictChar #filename - path wo file", "yield x_batch, y_batch x_batch, y_batch = [], [] if type(x[0])", "f(word): # char ids for word if (dictChars is not", "5, \"B-LOC\": 3} result = [(\"PER\", 0, 2), (\"LOC\", 3,", "> self.maxIter: break yield words, tags words, tags = [],", "line.strip().split(' ') word = line[0] if word in dictionary: embedding", "self.maxIter = maxIter # max number of sentences to yield", "ls[0],ls[-1] if self.processingWord is not None: word = self.processingWord(word) if", "None # End of a chunk + start of a", "words, tags = [], [] for line in f: line", "x_batch, y_batch def getChunkType(tok, idxToTag): tagName = idxToTag[tok] tagClass =", "sequenceLength = [], [] for seq in sequences: seq =", "# function that takes a tag as input self.maxIter =", "getCompactGloveVectors(filename): try: with np.load(filename) as data: return data[\"embeddings\"] except IOError:", "takes a word as input self.processingTag = processingTag # function", "tagClass = tagName.split('-')[0] tagType = tagName.split('-')[-1] return tagClass, tagType def", "End of a chunk 1 if tok == default and", "= word.lower() if word.isdigit(): word = NUM # word id", "size\") return dictWords, dictTags def getCharDictionary(dataset): dictChar = set() for", "_ in self: self.length += 1 return self.length #Create a", "if len(x_batch) != 0: yield x_batch, y_batch def getChunkType(tok, idxToTag):", "= 4 Returns: list of (chunkType, chunkStart, chunkEnd) Example: seq", "are same length sp, sl = _padSequences(seq, padtok, maxLengthWord) sequencePadded", "words += [word] tags += [tag] def __len__(self): if self.length", "max(map(lambda x : len(x), sequences)) sequencePadded, _ = _padSequences(sequencePadded, [padtok]*maxLengthWord,", "filename): message = \"ERROR: Can not find file {}.\".format(filename) super(ParrotIOError,", "+= 1 return self.length #Create a dictionary from dataset def", "of a chunk 1 if tok == default and chunkType", "embeddings[wordID] = np.asarray(embedding) np.savez_compressed(trimmedFilename, embeddings=embeddings) # store glove matrix def", "line[1:]] #glove coords wordID = dictionary[word] embeddings[wordID] = np.asarray(embedding) np.savez_compressed(trimmedFilename,", "d[word] = idx except IOError: raise ParrotIOError(filename) return d def", "# chars ids and word id return charIDs, word #", "data[\"embeddings\"] except IOError: raise ParrotIOError(filename) def getProcessingWord(dictWords=None, dictChars=None, lowercase=False, chars=False,", "import numpy as np import os from .logger import printLog", "path wo file with vectors def getGloveDictionary(filename): printLog(\"Building dictionary\") dictGlove", "if (dictWords is not None): if word in dictWords: word", "Exception(\"Unknow tag.\") if (dictChars is not None) and (chars ==", "glove matrix def getCompactGloveVectors(filename): try: with np.load(filename) as data: return", "not None: word = self.processingWord(word) if self.processingTag is not None:", "line[0] if word in dictionary: embedding = [float(x) for x", "0 for _ in self: self.length += 1 return self.length", "y_batch def getChunkType(tok, idxToTag): tagName = idxToTag[tok] tagClass = tagName.split('-')[0]", "encoding='utf-8') as f: for line in f: line = line.strip().split('", "idxToTag[tok] tagClass = tagName.split('-')[0] tagType = tagName.split('-')[-1] return tagClass, tagType", "ids for word if (dictChars is not None) and (chars", "+= [y] if len(x_batch) != 0: yield x_batch, y_batch def", "np.asarray(embedding) np.savez_compressed(trimmedFilename, embeddings=embeddings) # store glove matrix def getCompactGloveVectors(filename): try:", "in dictWords: word = dictWords[word] elif allowUNK: word = dictWords[UNK]", "in dictionary: embedding = [float(x) for x in line[1:]] #glove", "- path wo file with vectors def getGloveDictionary(filename): printLog(\"Building dictionary\")", "y_batch = [], [] for (x, y) in data: if", "dictionary[word] embeddings[wordID] = np.asarray(embedding) np.savez_compressed(trimmedFilename, embeddings=embeddings) # store glove matrix", "seq)) for seq in sequences]) sequencePadded, sequenceLength = [], []", "with open(gloveFilename, encoding='utf-8') as f: for line in f: line", "!= default: tokChunkClass, tokChunkType = getChunkType(tok, idxToTag) if chunkType is", "padtok, maxLengthWord) sequencePadded += [sp] sequenceLength += [sl] maxLengthSentence =", "default = tags[NONE] idxToTag = {idx: tag for tag, idx", "is not None and niter > self.maxIter: break yield words,", "= (chunkType, chunkStart, i) chunks.append(chunk) chunkType, chunkStart = None, None", "find file {}.\".format(filename) super(ParrotIOError, self).__init__(message) # Class that iterates over", "[tag] def __len__(self): if self.length is None: self.length = 0", "allowUNK: word = dictWords[UNK] else: raise Exception(\"Unknow tag.\") if (dictChars", "None def __iter__(self): niter = 0 with open(self.filename, encoding='utf-8') as", "maxIter # max number of sentences to yield self.length =", "= maxIter # max number of sentences to yield self.length", "if (dictChars is not None) and (chars == True): charIDs", "tags): \"\"\"Given a sequence of tags, group entities and their", "\" size\") return dictWords, dictTags def getCharDictionary(dataset): dictChar = set()", "sequencePadded += [seq_] sequenceLength += [min(len(seq), maxLength)] # all sublist", "_ in dataset: for word in words: dictChar.update(word) return dictChar", "= \"$UNK$\" NUM = \"$NUM$\" NONE = \"O\" class ParrotIOError(Exception):", "getProcessingWord(dictWords=None, dictChars=None, lowercase=False, chars=False, allowUNK=True): def f(word): # char ids", "minibatches(data, minibatchSize): x_batch, y_batch = [], [] for (x, y)", "for words, tags in dataset: dictWords.update(words) dictTags.update(tags) printLog(\"DONE: \" +", "a tag as input self.maxIter = maxIter # max number", "for word in words: dictChar.update(word) return dictChar #filename - path", "Example: seq = [4, 5, 0, 3] tags = {\"B-PER\":", "= _padSequences(sequences, padtok, maxLength) elif nlevels == 2: maxLengthWord =", "sequencePadded, sequenceLength = [], [] for seq in sequences: #", "default: tokChunkClass, tokChunkType = getChunkType(tok, idxToTag) if chunkType is None:", "to yield self.length = None def __iter__(self): niter = 0", "= _padSequences(sequenceLength, 0, maxLengthSentence) return sequencePadded, sequenceLength def minibatches(data, minibatchSize):", "+= [word] tags += [tag] def __len__(self): if self.length is", "= [], [] for (x, y) in data: if len(x_batch)", "# word id if (dictWords is not None): if word", "or line.startswith(\"-DOCSTART-\")): if len(words) != 0: niter += 1 if", "for line in f: word = line.strip().split(' ')[0] dictGlove.add(word) printLog(\"DONE:", "f: for i, word in enumerate(dictionary): if i != len(dictionary)", "function that takes a tag as input self.maxIter = maxIter", "= [float(x) for x in line[1:]] #glove coords wordID =", "sequencePadded, _ = _padSequences(sequencePadded, [padtok]*maxLengthWord, maxLengthSentence) sequenceLength, _ = _padSequences(sequenceLength,", "Dataset class CoNLLDataset(object): def __init__(self, filename, processingWord=None, processingTag=None, maxIter=None): self.filename", "(dictChars is not None) and (chars == True): # chars", "processingTag # function that takes a tag as input self.maxIter", "self.processingWord(word) if self.processingTag is not None: tag = self.processingTag(tag) words", "self.processingTag(tag) words += [word] tags += [tag] def __len__(self): if", "end if (len(line) == 0 or line.startswith(\"-DOCSTART-\")): if len(words) !=", "self: self.length += 1 return self.length #Create a dictionary from", "as data: return data[\"embeddings\"] except IOError: raise ParrotIOError(filename) def getProcessingWord(dictWords=None,", "break yield words, tags words, tags = [], [] else:", "filename, processingWord=None, processingTag=None, maxIter=None): self.filename = filename self.processingWord = processingWord", "zip(*x) x_batch += [x] y_batch += [y] if len(x_batch) !=", "def getChunkType(tok, idxToTag): tagName = idxToTag[tok] tagClass = tagName.split('-')[0] tagType", "word, tag = ls[0],ls[-1] if self.processingWord is not None: word", "in data: if len(x_batch) == minibatchSize: yield x_batch, y_batch x_batch,", "[] for seq in sequences: # all words are same", "# max number of sentences to yield self.length = None", "as f: for line in f: line = line.strip().split(' ')", "= NUM # word id if (dictWords is not None):", "words are same length sp, sl = _padSequences(seq, padtok, maxLengthWord)", "(\"LOC\", 3, 4)] \"\"\" default = tags[NONE] idxToTag = {idx:", "# End of a chunk + start of a chunk!", "_padSequences(sequenceLength, 0, maxLengthSentence) return sequencePadded, sequenceLength def minibatches(data, minibatchSize): x_batch,", "as input self.maxIter = maxIter # max number of sentences", "(chars == True): charIDs = [] for char in word:", "for _ in self: self.length += 1 return self.length #Create", "getGloveDictionary(filename): printLog(\"Building dictionary\") dictGlove = set() with open(filename, encoding='utf-8') as", "{idx: tag for tag, idx in tags.items()} chunks = []", "as f: for i, word in enumerate(dictionary): if i !=", "ParrotIOError(filename) return d def exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename, dim): embeddings =", "= tagName.split('-')[-1] return tagClass, tagType def getChunks(seq, tags): \"\"\"Given a", "1 return self.length #Create a dictionary from dataset def getDictionary(datasets):", "= [] chunkType, chunkStart = None, None for i, tok", "= set() for dataset in datasets: for words, tags in", "encoding='utf-8') as f: for idx, word in enumerate(f): word =", "if self.maxIter is not None and niter > self.maxIter: break", "self.length = None def __iter__(self): niter = 0 with open(self.filename,", "#Create a dictionary from dataset def getDictionary(datasets): printLog(\"Building dictionary: \")", "= set() for words, _ in dataset: for word in", "= 0 with open(self.filename, encoding='utf-8') as f: words, tags =", "input self.processingTag = processingTag # function that takes a tag", "\"$UNK$\" NUM = \"$NUM$\" NONE = \"O\" class ParrotIOError(Exception): def", "f: for line in f: line = line.strip().split(' ') word", "except IOError: raise ParrotIOError(filename) return d def exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename,", "= [] for char in word: if (char in dictChars):", "elif allowUNK: word = dictWords[UNK] else: raise Exception(\"Unknow tag.\") if", "Add a chunk. chunk = (chunkType, chunkStart, i) chunks.append(chunk) chunkType,", "import printLog UNK = \"$UNK$\" NUM = \"$NUM$\" NONE =", "not None and niter > self.maxIter: break yield words, tags", "line = line.strip().split(' ') word = line[0] if word in", "seq_ = seq[:maxLength] + [padtok]*max(maxLength - len(seq), 0) sequencePadded +=", "matrix def getCompactGloveVectors(filename): try: with np.load(filename) as data: return data[\"embeddings\"]", "id return charIDs, word # word id return word return", "is not None: tag = self.processingTag(tag) words += [word] tags", "dict() with open(filename, encoding='utf-8') as f: for idx, word in", "maxLength): sequencePadded, sequenceLength = [], [] for seq in sequences:", "None: tag = self.processingTag(tag) words += [word] tags += [tag]", "None for i, tok in enumerate(seq): # End of a", "i, word in enumerate(dictionary): if i != len(dictionary) - 1:", "as f: for idx, word in enumerate(f): word = word.strip()", "word in dictWords: word = dictWords[word] elif allowUNK: word =", "tag = ls[0],ls[-1] if self.processingWord is not None: word =", "None) and (chars == True): charIDs = [] for char", "tags in dataset: dictWords.update(words) dictTags.update(tags) printLog(\"DONE: \" + str(len(dictWords)) +", "list of (chunkType, chunkStart, chunkEnd) Example: seq = [4, 5,", "not None: tag = self.processingTag(tag) words += [word] tags +=", "return word return f def _padSequences(sequences, padtok, maxLength): sequencePadded, sequenceLength", "for dataset in datasets: for words, tags in dataset: dictWords.update(words)", "sequencePadded, sequenceLength = _padSequences(sequences, padtok, maxLength) elif nlevels == 2:", "maxLength)] # all sublist have same length return sequencePadded, sequenceLength", "# Class that iterates over CoNLL Dataset class CoNLLDataset(object): def", "idxToTag = {idx: tag for tag, idx in tags.items()} chunks", "iterates over CoNLL Dataset class CoNLLDataset(object): def __init__(self, filename, processingWord=None,", "[word] tags += [tag] def __len__(self): if self.length is None:", "f: for idx, word in enumerate(f): word = word.strip() d[word]", "getChunkType(tok, idxToTag): tagName = idxToTag[tok] tagClass = tagName.split('-')[0] tagType =", "if chunkType is None: chunkType, chunkStart = tokChunkType, i elif", "= \"O\" class ParrotIOError(Exception): def __init__(self, filename): message = \"ERROR:", "def minibatches(data, minibatchSize): x_batch, y_batch = [], [] for (x,", "x : len(x), sequences)) sequencePadded, _ = _padSequences(sequencePadded, [padtok]*maxLengthWord, maxLengthSentence)", "self.length #Create a dictionary from dataset def getDictionary(datasets): printLog(\"Building dictionary:", "== True): charIDs = [] for char in word: if", "self.maxIter: break yield words, tags words, tags = [], []", "None: self.length = 0 for _ in self: self.length +=", "padtok, maxLength) elif nlevels == 2: maxLengthWord = max([max(map(lambda x:", "dictionary: \") dictWords = set() dictTags = set() for dataset", "chunkStart = None, None # End of a chunk +", "exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename, dim): embeddings = np.zeros([len(dictionary), dim]) with open(gloveFilename,", "\"\"\" default = tags[NONE] idxToTag = {idx: tag for tag,", "chunks = [] chunkType, chunkStart = None, None for i,", "id return word return f def _padSequences(sequences, padtok, maxLength): sequencePadded,", "0, 3] tags = {\"B-PER\": 4, \"I-PER\": 5, \"B-LOC\": 3}", "tag, idx in tags.items()} chunks = [] chunkType, chunkStart =", "+= [x] y_batch += [y] if len(x_batch) != 0: yield", "if (char in dictChars): charIDs.append(dictChars[char]) if lowercase: word = word.lower()", "f: line = line.strip().split(' ') word = line[0] if word", "tok in enumerate(seq): # End of a chunk 1 if", "\"I-PER\": 5, \"B-LOC\": 3} result = [(\"PER\", 0, 2), (\"LOC\",", "f: words, tags = [], [] for line in f:", "not None) and (chars == True): # chars ids and", "for x in line[1:]] #glove coords wordID = dictionary[word] embeddings[wordID]", "id if (dictWords is not None): if word in dictWords:", "+= 1 if self.maxIter is not None and niter >", "idxToTag): tagName = idxToTag[tok] tagClass = tagName.split('-')[0] tagType = tagName.split('-')[-1]", "maxLengthWord) sequencePadded += [sp] sequenceLength += [sl] maxLengthSentence = max(map(lambda", "for tag, idx in tags.items()} chunks = [] chunkType, chunkStart", "d = dict() with open(filename, encoding='utf-8') as f: for idx,", "chunks.append(chunk) chunkType, chunkStart = tokChunkType, i else: pass # end", "trimmedFilename, dim): embeddings = np.zeros([len(dictionary), dim]) with open(gloveFilename, encoding='utf-8') as", "dictGlove.add(word) printLog(\"DONE: \"+ str(len(dictGlove)) +\" tokens\") return dictGlove def saveDictionary(dictionary,", "Returns: list of (chunkType, chunkStart, chunkEnd) Example: seq = [4,", "chunkType is not None: # Add a chunk. chunk =", "def padSequences(sequences, padtok, nlevels=1): if nlevels == 1: maxLength =", "chunkStart, i) chunks.append(chunk) chunkType, chunkStart = tokChunkType, i else: pass", "tag = self.processingTag(tag) words += [word] tags += [tag] def", "__len__(self): if self.length is None: self.length = 0 for _", "#glove coords wordID = dictionary[word] embeddings[wordID] = np.asarray(embedding) np.savez_compressed(trimmedFilename, embeddings=embeddings)", "seq = list(seq) seq_ = seq[:maxLength] + [padtok]*max(maxLength - len(seq),", "len(seq), 0) sequencePadded += [seq_] sequenceLength += [min(len(seq), maxLength)] #", "\"O\" class ParrotIOError(Exception): def __init__(self, filename): message = \"ERROR: Can", "dictChars): charIDs.append(dictChars[char]) if lowercase: word = word.lower() if word.isdigit(): word", "length return sequencePadded, sequenceLength def padSequences(sequences, padtok, nlevels=1): if nlevels", "line in f: line = line.strip() # delete spaces in", "with np.load(filename) as data: return data[\"embeddings\"] except IOError: raise ParrotIOError(filename)", "store glove matrix def getCompactGloveVectors(filename): try: with np.load(filename) as data:", "is not None) and (chars == True): # chars ids", "= word.strip() d[word] = idx except IOError: raise ParrotIOError(filename) return", "seq in sequences: # all words are same length sp,", "else: pass # end condition if chunkType is not None:", "printLog UNK = \"$UNK$\" NUM = \"$NUM$\" NONE = \"O\"", "(chunkType, chunkStart, i) chunks.append(chunk) chunkType, chunkStart = None, None #", "+= [seq_] sequenceLength += [min(len(seq), maxLength)] # all sublist have", "and their position Args: seq: [4, 4, 0, 0, ...]", "sequence of tags, group entities and their position Args: seq:", "f: word = line.strip().split(' ')[0] dictGlove.add(word) printLog(\"DONE: \"+ str(len(dictGlove)) +\"", "[] for seq in sequences: seq = list(seq) seq_ =", "[sl] maxLengthSentence = max(map(lambda x : len(x), sequences)) sequencePadded, _", "all sublist have same length return sequencePadded, sequenceLength def padSequences(sequences,", "except IOError: raise ParrotIOError(filename) def getProcessingWord(dictWords=None, dictChars=None, lowercase=False, chars=False, allowUNK=True):", "tagName.split('-')[-1] return tagClass, tagType def getChunks(seq, tags): \"\"\"Given a sequence", "idxToTag) if chunkType is None: chunkType, chunkStart = tokChunkType, i", "dictChar #filename - path wo file with vectors def getGloveDictionary(filename):", "(chars == True): # chars ids and word id return", "Class that iterates over CoNLL Dataset class CoNLLDataset(object): def __init__(self,", "file with vectors def getGloveDictionary(filename): printLog(\"Building dictionary\") dictGlove = set()", "in enumerate(dictionary): if i != len(dictionary) - 1: f.write(\"{}\\n\".format(word)) else:", "a sequence of tags, group entities and their position Args:", "tagType = tagName.split('-')[-1] return tagClass, tagType def getChunks(seq, tags): \"\"\"Given", "word.isdigit(): word = NUM # word id if (dictWords is", "[] for char in word: if (char in dictChars): charIDs.append(dictChars[char])", "sequences: # all words are same length sp, sl =", "def loadDictionary(filename): try: d = dict() with open(filename, encoding='utf-8') as", "open(filename, \"w\", encoding='utf-8') as f: for i, word in enumerate(dictionary):", "y_batch x_batch, y_batch = [], [] if type(x[0]) == tuple:", "ParrotIOError(Exception): def __init__(self, filename): message = \"ERROR: Can not find", "(char in dictChars): charIDs.append(dictChars[char]) if lowercase: word = word.lower() if", "__init__(self, filename, processingWord=None, processingTag=None, maxIter=None): self.filename = filename self.processingWord =", "self.length is None: self.length = 0 for _ in self:", "+ [padtok]*max(maxLength - len(seq), 0) sequencePadded += [seq_] sequenceLength +=", "x in line[1:]] #glove coords wordID = dictionary[word] embeddings[wordID] =", "4, 0, 0, ...] sequence of labels tags: dict[\"O\"] =", "Can not find file {}.\".format(filename) super(ParrotIOError, self).__init__(message) # Class that", "over CoNLL Dataset class CoNLLDataset(object): def __init__(self, filename, processingWord=None, processingTag=None,", "else: ls = line.split(' ') word, tag = ls[0],ls[-1] if", "sequenceLength, _ = _padSequences(sequenceLength, 0, maxLengthSentence) return sequencePadded, sequenceLength def", "labels tags: dict[\"O\"] = 4 Returns: list of (chunkType, chunkStart,", "= [(\"PER\", 0, 2), (\"LOC\", 3, 4)] \"\"\" default =", "words, tags in dataset: dictWords.update(words) dictTags.update(tags) printLog(\"DONE: \" + str(len(dictWords))", "maxIter=None): self.filename = filename self.processingWord = processingWord # function that", "self).__init__(message) # Class that iterates over CoNLL Dataset class CoNLLDataset(object):", "with open(filename, encoding='utf-8') as f: for idx, word in enumerate(f):", "vectors def getGloveDictionary(filename): printLog(\"Building dictionary\") dictGlove = set() with open(filename,", "word = NUM # word id if (dictWords is not", ": len(x), sequences)) sequencePadded, sequenceLength = _padSequences(sequences, padtok, maxLength) elif", "chunk 1 if tok == default and chunkType is not", "for seq in sequences: seq = list(seq) seq_ = seq[:maxLength]", "(chunkType, chunkStart, i) chunks.append(chunk) chunkType, chunkStart = tokChunkType, i else:", "f: line = line.strip() # delete spaces in start and", "open(filename, encoding='utf-8') as f: for line in f: word =", "with vectors def getGloveDictionary(filename): printLog(\"Building dictionary\") dictGlove = set() with", "3] tags = {\"B-PER\": 4, \"I-PER\": 5, \"B-LOC\": 3} result", "_padSequences(sequencePadded, [padtok]*maxLengthWord, maxLengthSentence) sequenceLength, _ = _padSequences(sequenceLength, 0, maxLengthSentence) return", "= max(map(lambda x : len(x), sequences)) sequencePadded, sequenceLength = _padSequences(sequences,", "char in word: if (char in dictChars): charIDs.append(dictChars[char]) if lowercase:", "dictWords = set() dictTags = set() for dataset in datasets:", "# store glove matrix def getCompactGloveVectors(filename): try: with np.load(filename) as", "tag.\") if (dictChars is not None) and (chars == True):", "all words are same length sp, sl = _padSequences(seq, padtok,", "\"w\", encoding='utf-8') as f: for i, word in enumerate(dictionary): if", "getCharDictionary(dataset): dictChar = set() for words, _ in dataset: for", "words: dictChar.update(word) return dictChar #filename - path wo file with", "charIDs.append(dictChars[char]) if lowercase: word = word.lower() if word.isdigit(): word =", "None): if word in dictWords: word = dictWords[word] elif allowUNK:", "self.length = 0 for _ in self: self.length += 1", "[], [] for (x, y) in data: if len(x_batch) ==", "open(self.filename, encoding='utf-8') as f: words, tags = [], [] for", "in f: line = line.strip() # delete spaces in start", "a word as input self.processingTag = processingTag # function that", "== 2: maxLengthWord = max([max(map(lambda x: len(x), seq)) for seq", "position Args: seq: [4, 4, 0, 0, ...] sequence of", "!= 0: niter += 1 if self.maxIter is not None", "= _padSequences(seq, padtok, maxLengthWord) sequencePadded += [sp] sequenceLength += [sl]", "in enumerate(seq): # End of a chunk 1 if tok", "not None: # Add a chunk. chunk = (chunkType, chunkStart,", "self.processingWord = processingWord # function that takes a word as", "chunks.append(chunk) chunkType, chunkStart = None, None # End of a", "for i, word in enumerate(dictionary): if i != len(dictionary) -", "is not None): if word in dictWords: word = dictWords[word]", "if word in dictionary: embedding = [float(x) for x in", "word id if (dictWords is not None): if word in", "sequencePadded, sequenceLength def padSequences(sequences, padtok, nlevels=1): if nlevels == 1:", "printLog(\"DONE: \" + str(len(dictWords)) + \" size\") return dictWords, dictTags", "(len(line) == 0 or line.startswith(\"-DOCSTART-\")): if len(words) != 0: niter", "= None, None for i, tok in enumerate(seq): # End", "word id return charIDs, word # word id return word", "maxLengthWord = max([max(map(lambda x: len(x), seq)) for seq in sequences])", "chars=False, allowUNK=True): def f(word): # char ids for word if", "same length sp, sl = _padSequences(seq, padtok, maxLengthWord) sequencePadded +=", "= {\"B-PER\": 4, \"I-PER\": 5, \"B-LOC\": 3} result = [(\"PER\",", "lowercase=False, chars=False, allowUNK=True): def f(word): # char ids for word", "try: d = dict() with open(filename, encoding='utf-8') as f: for", "getChunks(seq, tags): \"\"\"Given a sequence of tags, group entities and", "NUM # word id if (dictWords is not None): if", "= tokChunkType, i elif tokChunkType != chunkType or tokChunkClass ==", "x_batch += [x] y_batch += [y] if len(x_batch) != 0:", "= [], [] else: ls = line.split(' ') word, tag", "\" + str(len(dictWords)) + \" size\") return dictWords, dictTags def", "f: for line in f: word = line.strip().split(' ')[0] dictGlove.add(word)", "IOError: raise ParrotIOError(filename) return d def exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename, dim):", "coords wordID = dictionary[word] embeddings[wordID] = np.asarray(embedding) np.savez_compressed(trimmedFilename, embeddings=embeddings) #", "= idxToTag[tok] tagClass = tagName.split('-')[0] tagType = tagName.split('-')[-1] return tagClass,", "return dictWords, dictTags def getCharDictionary(dataset): dictChar = set() for words,", "tok == default and chunkType is not None: # Add", "UNK = \"$UNK$\" NUM = \"$NUM$\" NONE = \"O\" class", "__init__(self, filename): message = \"ERROR: Can not find file {}.\".format(filename)", "processingWord=None, processingTag=None, maxIter=None): self.filename = filename self.processingWord = processingWord #", "tag as input self.maxIter = maxIter # max number of", "return sequencePadded, sequenceLength def minibatches(data, minibatchSize): x_batch, y_batch = [],", "group entities and their position Args: seq: [4, 4, 0,", "line.strip() # delete spaces in start and end if (len(line)", "and niter > self.maxIter: break yield words, tags words, tags", "not None: chunk = (chunkType, chunkStart, len(seq)) chunks.append(chunk) return chunks", "numpy as np import os from .logger import printLog UNK", "1: f.write(\"{}\\n\".format(word)) else: f.write(word) def loadDictionary(filename): try: d = dict()", "return tagClass, tagType def getChunks(seq, tags): \"\"\"Given a sequence of", "_padSequences(sequences, padtok, maxLength) elif nlevels == 2: maxLengthWord = max([max(map(lambda", "elif tokChunkType != chunkType or tokChunkClass == \"B\": chunk =", "from dataset def getDictionary(datasets): printLog(\"Building dictionary: \") dictWords = set()", "maxLengthSentence) sequenceLength, _ = _padSequences(sequenceLength, 0, maxLengthSentence) return sequencePadded, sequenceLength", "= self.processingWord(word) if self.processingTag is not None: tag = self.processingTag(tag)", "+ \" size\") return dictWords, dictTags def getCharDictionary(dataset): dictChar =", "sl = _padSequences(seq, padtok, maxLengthWord) sequencePadded += [sp] sequenceLength +=", "!= len(dictionary) - 1: f.write(\"{}\\n\".format(word)) else: f.write(word) def loadDictionary(filename): try:", "0, 0, ...] sequence of labels tags: dict[\"O\"] = 4", "tag for tag, idx in tags.items()} chunks = [] chunkType,", ": len(x), sequences)) sequencePadded, _ = _padSequences(sequencePadded, [padtok]*maxLengthWord, maxLengthSentence) sequenceLength,", "def getCharDictionary(dataset): dictChar = set() for words, _ in dataset:", "x : len(x), sequences)) sequencePadded, sequenceLength = _padSequences(sequences, padtok, maxLength)", "== tuple: x = zip(*x) x_batch += [x] y_batch +=", "True): # chars ids and word id return charIDs, word", "CoNLL Dataset class CoNLLDataset(object): def __init__(self, filename, processingWord=None, processingTag=None, maxIter=None):", "maxLength) elif nlevels == 2: maxLengthWord = max([max(map(lambda x: len(x),", "in f: word = line.strip().split(' ')[0] dictGlove.add(word) printLog(\"DONE: \"+ str(len(dictGlove))", "takes a tag as input self.maxIter = maxIter # max", "printLog(\"DONE: \"+ str(len(dictGlove)) +\" tokens\") return dictGlove def saveDictionary(dictionary, filename):", "None) and (chars == True): # chars ids and word", "def __iter__(self): niter = 0 with open(self.filename, encoding='utf-8') as f:", "filename self.processingWord = processingWord # function that takes a word", "= line.split(' ') word, tag = ls[0],ls[-1] if self.processingWord is", "0, ...] sequence of labels tags: dict[\"O\"] = 4 Returns:", "f def _padSequences(sequences, padtok, maxLength): sequencePadded, sequenceLength = [], []", "processingTag=None, maxIter=None): self.filename = filename self.processingWord = processingWord # function", "x = zip(*x) x_batch += [x] y_batch += [y] if", "d def exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename, dim): embeddings = np.zeros([len(dictionary), dim])", "if word.isdigit(): word = NUM # word id if (dictWords", "tags += [tag] def __len__(self): if self.length is None: self.length", "a chunk! elif tok != default: tokChunkClass, tokChunkType = getChunkType(tok,", "length sp, sl = _padSequences(seq, padtok, maxLengthWord) sequencePadded += [sp]", "len(words) != 0: niter += 1 if self.maxIter is not", "word = self.processingWord(word) if self.processingTag is not None: tag =", "0 with open(self.filename, encoding='utf-8') as f: words, tags = [],", "word in enumerate(f): word = word.strip() d[word] = idx except", "tagType def getChunks(seq, tags): \"\"\"Given a sequence of tags, group", "filename): printLog(\"SAVE\") with open(filename, \"w\", encoding='utf-8') as f: for i,", "[], [] else: ls = line.split(' ') word, tag =", "{\"B-PER\": 4, \"I-PER\": 5, \"B-LOC\": 3} result = [(\"PER\", 0,", "= {idx: tag for tag, idx in tags.items()} chunks =", "idx in tags.items()} chunks = [] chunkType, chunkStart = None,", "= None, None # End of a chunk + start", "as f: for line in f: word = line.strip().split(' ')[0]", "in sequences: seq = list(seq) seq_ = seq[:maxLength] + [padtok]*max(maxLength", "dataset: dictWords.update(words) dictTags.update(tags) printLog(\"DONE: \" + str(len(dictWords)) + \" size\")", "and word id return charIDs, word # word id return", "for seq in sequences]) sequencePadded, sequenceLength = [], [] for", "word id return word return f def _padSequences(sequences, padtok, maxLength):", "self.length += 1 return self.length #Create a dictionary from dataset", "padSequences(sequences, padtok, nlevels=1): if nlevels == 1: maxLength = max(map(lambda", "= zip(*x) x_batch += [x] y_batch += [y] if len(x_batch)", "if tok == default and chunkType is not None: #", "entities and their position Args: seq: [4, 4, 0, 0,", "5, 0, 3] tags = {\"B-PER\": 4, \"I-PER\": 5, \"B-LOC\":", "class ParrotIOError(Exception): def __init__(self, filename): message = \"ERROR: Can not", "== default and chunkType is not None: # Add a", "is not None: word = self.processingWord(word) if self.processingTag is not", "with open(self.filename, encoding='utf-8') as f: words, tags = [], []", "= set() dictTags = set() for dataset in datasets: for", "enumerate(seq): # End of a chunk 1 if tok ==", "[] for line in f: line = line.strip() # delete", "nlevels=1): if nlevels == 1: maxLength = max(map(lambda x :", "max number of sentences to yield self.length = None def", "in enumerate(f): word = word.strip() d[word] = idx except IOError:", "as input self.processingTag = processingTag # function that takes a", "i elif tokChunkType != chunkType or tokChunkClass == \"B\": chunk", "CoNLLDataset(object): def __init__(self, filename, processingWord=None, processingTag=None, maxIter=None): self.filename = filename", "embeddings=embeddings) # store glove matrix def getCompactGloveVectors(filename): try: with np.load(filename)", "message = \"ERROR: Can not find file {}.\".format(filename) super(ParrotIOError, self).__init__(message)", "printLog(\"Building dictionary: \") dictWords = set() dictTags = set() for", "word = line.strip().split(' ')[0] dictGlove.add(word) printLog(\"DONE: \"+ str(len(dictGlove)) +\" tokens\")", "os from .logger import printLog UNK = \"$UNK$\" NUM =", "idx except IOError: raise ParrotIOError(filename) return d def exportCompactGloveVectors(dictionary, gloveFilename,", "word = dictWords[UNK] else: raise Exception(\"Unknow tag.\") if (dictChars is", "function that takes a word as input self.processingTag = processingTag", "word = line[0] if word in dictionary: embedding = [float(x)", "niter = 0 with open(self.filename, encoding='utf-8') as f: words, tags", "chunkType, chunkStart = None, None # End of a chunk", "word if (dictChars is not None) and (chars == True):", "self.processingTag is not None: tag = self.processingTag(tag) words += [word]", "dim): embeddings = np.zeros([len(dictionary), dim]) with open(gloveFilename, encoding='utf-8') as f:", "= dictionary[word] embeddings[wordID] = np.asarray(embedding) np.savez_compressed(trimmedFilename, embeddings=embeddings) # store glove", "super(ParrotIOError, self).__init__(message) # Class that iterates over CoNLL Dataset class", "chars ids and word id return charIDs, word # word", "__iter__(self): niter = 0 with open(self.filename, encoding='utf-8') as f: words,", "return charIDs, word # word id return word return f", "is None: self.length = 0 for _ in self: self.length", "datasets: for words, tags in dataset: dictWords.update(words) dictTags.update(tags) printLog(\"DONE: \"", "\"B-LOC\": 3} result = [(\"PER\", 0, 2), (\"LOC\", 3, 4)]", "\") dictWords = set() dictTags = set() for dataset in", "= getChunkType(tok, idxToTag) if chunkType is None: chunkType, chunkStart =", "# function that takes a word as input self.processingTag =", "that takes a word as input self.processingTag = processingTag #", "niter += 1 if self.maxIter is not None and niter", "for words, _ in dataset: for word in words: dictChar.update(word)", "maxLengthSentence) return sequencePadded, sequenceLength def minibatches(data, minibatchSize): x_batch, y_batch =", "return d def exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename, dim): embeddings = np.zeros([len(dictionary),", "[], [] if type(x[0]) == tuple: x = zip(*x) x_batch", "== 0 or line.startswith(\"-DOCSTART-\")): if len(words) != 0: niter +=", "chunkType is not None: chunk = (chunkType, chunkStart, len(seq)) chunks.append(chunk)", "sequenceLength def minibatches(data, minibatchSize): x_batch, y_batch = [], [] for", "chunkType or tokChunkClass == \"B\": chunk = (chunkType, chunkStart, i)", "line in f: word = line.strip().split(' ')[0] dictGlove.add(word) printLog(\"DONE: \"+", "= dictWords[word] elif allowUNK: word = dictWords[UNK] else: raise Exception(\"Unknow", "_padSequences(sequences, padtok, maxLength): sequencePadded, sequenceLength = [], [] for seq", "tags words, tags = [], [] else: ls = line.split('", "a chunk. chunk = (chunkType, chunkStart, i) chunks.append(chunk) chunkType, chunkStart", "chunk = (chunkType, chunkStart, i) chunks.append(chunk) chunkType, chunkStart = None,", "def saveDictionary(dictionary, filename): printLog(\"SAVE\") with open(filename, \"w\", encoding='utf-8') as f:", "sequences)) sequencePadded, _ = _padSequences(sequencePadded, [padtok]*maxLengthWord, maxLengthSentence) sequenceLength, _ =", "')[0] dictGlove.add(word) printLog(\"DONE: \"+ str(len(dictGlove)) +\" tokens\") return dictGlove def", "def exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename, dim): embeddings = np.zeros([len(dictionary), dim]) with", "self.maxIter is not None and niter > self.maxIter: break yield", "x_batch, y_batch = [], [] for (x, y) in data:", "{}.\".format(filename) super(ParrotIOError, self).__init__(message) # Class that iterates over CoNLL Dataset", "from .logger import printLog UNK = \"$UNK$\" NUM = \"$NUM$\"", "') word, tag = ls[0],ls[-1] if self.processingWord is not None:", "for char in word: if (char in dictChars): charIDs.append(dictChars[char]) if", "f.write(word) def loadDictionary(filename): try: d = dict() with open(filename, encoding='utf-8')", "str(len(dictWords)) + \" size\") return dictWords, dictTags def getCharDictionary(dataset): dictChar", "idx, word in enumerate(f): word = word.strip() d[word] = idx", "= _padSequences(sequencePadded, [padtok]*maxLengthWord, maxLengthSentence) sequenceLength, _ = _padSequences(sequenceLength, 0, maxLengthSentence)", "nlevels == 1: maxLength = max(map(lambda x : len(x), sequences))", "= ls[0],ls[-1] if self.processingWord is not None: word = self.processingWord(word)", "+= [tag] def __len__(self): if self.length is None: self.length =", "chunkStart = tokChunkType, i elif tokChunkType != chunkType or tokChunkClass", "set() dictTags = set() for dataset in datasets: for words,", "not None): if word in dictWords: word = dictWords[word] elif", "chunkEnd) Example: seq = [4, 5, 0, 3] tags =", "for i, tok in enumerate(seq): # End of a chunk", "encoding='utf-8') as f: words, tags = [], [] for line", "# delete spaces in start and end if (len(line) ==", "sequenceLength += [min(len(seq), maxLength)] # all sublist have same length", "max(map(lambda x : len(x), sequences)) sequencePadded, sequenceLength = _padSequences(sequences, padtok,", "= tagName.split('-')[0] tagType = tagName.split('-')[-1] return tagClass, tagType def getChunks(seq,", "chunk! elif tok != default: tokChunkClass, tokChunkType = getChunkType(tok, idxToTag)", "\"+ str(len(dictGlove)) +\" tokens\") return dictGlove def saveDictionary(dictionary, filename): printLog(\"SAVE\")", "[float(x) for x in line[1:]] #glove coords wordID = dictionary[word]", "chunkType, chunkStart = None, None for i, tok in enumerate(seq):", "3, 4)] \"\"\" default = tags[NONE] idxToTag = {idx: tag", "= 0 for _ in self: self.length += 1 return", "sequences)) sequencePadded, sequenceLength = _padSequences(sequences, padtok, maxLength) elif nlevels ==", "chunkStart = tokChunkType, i else: pass # end condition if", "chunkType is None: chunkType, chunkStart = tokChunkType, i elif tokChunkType", "word in words: dictChar.update(word) return dictChar #filename - path wo", "NONE = \"O\" class ParrotIOError(Exception): def __init__(self, filename): message =", "2: maxLengthWord = max([max(map(lambda x: len(x), seq)) for seq in", "seq in sequences]) sequencePadded, sequenceLength = [], [] for seq", "a chunk + start of a chunk! elif tok !=", "tags.items()} chunks = [] chunkType, chunkStart = None, None for", "+= [sl] maxLengthSentence = max(map(lambda x : len(x), sequences)) sequencePadded,", "of tags, group entities and their position Args: seq: [4,", "return sequencePadded, sequenceLength def padSequences(sequences, padtok, nlevels=1): if nlevels ==", "is not None: chunk = (chunkType, chunkStart, len(seq)) chunks.append(chunk) return", "i else: pass # end condition if chunkType is not", "charIDs = [] for char in word: if (char in", "len(dictionary) - 1: f.write(\"{}\\n\".format(word)) else: f.write(word) def loadDictionary(filename): try: d", "= dictWords[UNK] else: raise Exception(\"Unknow tag.\") if (dictChars is not", "x_batch, y_batch x_batch, y_batch = [], [] if type(x[0]) ==", "(dictWords is not None): if word in dictWords: word =", "[x] y_batch += [y] if len(x_batch) != 0: yield x_batch,", "def getCompactGloveVectors(filename): try: with np.load(filename) as data: return data[\"embeddings\"] except", "word: if (char in dictChars): charIDs.append(dictChars[char]) if lowercase: word =", "same length return sequencePadded, sequenceLength def padSequences(sequences, padtok, nlevels=1): if", "set() for dataset in datasets: for words, tags in dataset:", "self.processingWord is not None: word = self.processingWord(word) if self.processingTag is", "sequenceLength = _padSequences(sequences, padtok, maxLength) elif nlevels == 2: maxLengthWord", "is not None: # Add a chunk. chunk = (chunkType,", "tagName.split('-')[0] tagType = tagName.split('-')[-1] return tagClass, tagType def getChunks(seq, tags):", "\"B\": chunk = (chunkType, chunkStart, i) chunks.append(chunk) chunkType, chunkStart =", "else: raise Exception(\"Unknow tag.\") if (dictChars is not None) and", "number of sentences to yield self.length = None def __iter__(self):", "!= chunkType or tokChunkClass == \"B\": chunk = (chunkType, chunkStart,", "dataset in datasets: for words, tags in dataset: dictWords.update(words) dictTags.update(tags)", "[seq_] sequenceLength += [min(len(seq), maxLength)] # all sublist have same", "tagClass, tagType def getChunks(seq, tags): \"\"\"Given a sequence of tags,", "words, _ in dataset: for word in words: dictChar.update(word) return", "if lowercase: word = word.lower() if word.isdigit(): word = NUM", "start and end if (len(line) == 0 or line.startswith(\"-DOCSTART-\")): if", "= line.strip().split(' ') word = line[0] if word in dictionary:", "0, maxLengthSentence) return sequencePadded, sequenceLength def minibatches(data, minibatchSize): x_batch, y_batch", "condition if chunkType is not None: chunk = (chunkType, chunkStart,", "= max([max(map(lambda x: len(x), seq)) for seq in sequences]) sequencePadded,", "= list(seq) seq_ = seq[:maxLength] + [padtok]*max(maxLength - len(seq), 0)", "<reponame>J-CITY/METADATA-EXTRACTOR<filename>src/model/utils/utils.py import numpy as np import os from .logger import", "raise Exception(\"Unknow tag.\") if (dictChars is not None) and (chars", "= np.asarray(embedding) np.savez_compressed(trimmedFilename, embeddings=embeddings) # store glove matrix def getCompactGloveVectors(filename):", "seq = [4, 5, 0, 3] tags = {\"B-PER\": 4,", "== \"B\": chunk = (chunkType, chunkStart, i) chunks.append(chunk) chunkType, chunkStart", "') word = line[0] if word in dictionary: embedding =", "tokChunkType, i else: pass # end condition if chunkType is", "tags = [], [] else: ls = line.split(' ') word,", "else: f.write(word) def loadDictionary(filename): try: d = dict() with open(filename,", "result = [(\"PER\", 0, 2), (\"LOC\", 3, 4)] \"\"\" default", "word in enumerate(dictionary): if i != len(dictionary) - 1: f.write(\"{}\\n\".format(word))", "tags, group entities and their position Args: seq: [4, 4,", "\"$NUM$\" NONE = \"O\" class ParrotIOError(Exception): def __init__(self, filename): message", "i) chunks.append(chunk) chunkType, chunkStart = None, None # End of", "in dataset: dictWords.update(words) dictTags.update(tags) printLog(\"DONE: \" + str(len(dictWords)) + \"", "return f def _padSequences(sequences, padtok, maxLength): sequencePadded, sequenceLength = [],", "and (chars == True): charIDs = [] for char in", "word # word id return word return f def _padSequences(sequences,", "in self: self.length += 1 return self.length #Create a dictionary", "tags = {\"B-PER\": 4, \"I-PER\": 5, \"B-LOC\": 3} result =", "dictTags def getCharDictionary(dataset): dictChar = set() for words, _ in", "in dictChars): charIDs.append(dictChars[char]) if lowercase: word = word.lower() if word.isdigit():", "def getDictionary(datasets): printLog(\"Building dictionary: \") dictWords = set() dictTags =", "sequencePadded, sequenceLength = [], [] for seq in sequences: seq", "sublist have same length return sequencePadded, sequenceLength def padSequences(sequences, padtok,", "[4, 5, 0, 3] tags = {\"B-PER\": 4, \"I-PER\": 5,", "line.startswith(\"-DOCSTART-\")): if len(words) != 0: niter += 1 if self.maxIter", "return dictGlove def saveDictionary(dictionary, filename): printLog(\"SAVE\") with open(filename, \"w\", encoding='utf-8')", "a chunk 1 if tok == default and chunkType is", "[sp] sequenceLength += [sl] maxLengthSentence = max(map(lambda x : len(x),", "word as input self.processingTag = processingTag # function that takes", "with open(filename, \"w\", encoding='utf-8') as f: for i, word in", "chunk. chunk = (chunkType, chunkStart, i) chunks.append(chunk) chunkType, chunkStart =", "None: word = self.processingWord(word) if self.processingTag is not None: tag", "raise ParrotIOError(filename) return d def exportCompactGloveVectors(dictionary, gloveFilename, trimmedFilename, dim): embeddings", "[], [] for seq in sequences: # all words are", "chunkStart = None, None for i, tok in enumerate(seq): #", "enumerate(dictionary): if i != len(dictionary) - 1: f.write(\"{}\\n\".format(word)) else: f.write(word)", "if word in dictWords: word = dictWords[word] elif allowUNK: word", "delete spaces in start and end if (len(line) == 0", "if self.length is None: self.length = 0 for _ in", "dictChar = set() for words, _ in dataset: for word", "in word: if (char in dictChars): charIDs.append(dictChars[char]) if lowercase: word", "word return f def _padSequences(sequences, padtok, maxLength): sequencePadded, sequenceLength =", "len(x), sequences)) sequencePadded, sequenceLength = _padSequences(sequences, padtok, maxLength) elif nlevels", "maxLengthSentence = max(map(lambda x : len(x), sequences)) sequencePadded, _ =", "getDictionary(datasets): printLog(\"Building dictionary: \") dictWords = set() dictTags = set()", "import os from .logger import printLog UNK = \"$UNK$\" NUM", "getChunkType(tok, idxToTag) if chunkType is None: chunkType, chunkStart = tokChunkType,", "= \"$NUM$\" NONE = \"O\" class ParrotIOError(Exception): def __init__(self, filename):", "tokens\") return dictGlove def saveDictionary(dictionary, filename): printLog(\"SAVE\") with open(filename, \"w\",", "= \"ERROR: Can not find file {}.\".format(filename) super(ParrotIOError, self).__init__(message) #", "# char ids for word if (dictChars is not None)", "sentences to yield self.length = None def __iter__(self): niter =", "dictTags.update(tags) printLog(\"DONE: \" + str(len(dictWords)) + \" size\") return dictWords,", "= [], [] if type(x[0]) == tuple: x = zip(*x)", "NUM = \"$NUM$\" NONE = \"O\" class ParrotIOError(Exception): def __init__(self,", "in start and end if (len(line) == 0 or line.startswith(\"-DOCSTART-\")):", "dictChars=None, lowercase=False, chars=False, allowUNK=True): def f(word): # char ids for", "= filename self.processingWord = processingWord # function that takes a", "_ = _padSequences(sequencePadded, [padtok]*maxLengthWord, maxLengthSentence) sequenceLength, _ = _padSequences(sequenceLength, 0,", "0: yield x_batch, y_batch def getChunkType(tok, idxToTag): tagName = idxToTag[tok]", "i != len(dictionary) - 1: f.write(\"{}\\n\".format(word)) else: f.write(word) def loadDictionary(filename):", "0) sequencePadded += [seq_] sequenceLength += [min(len(seq), maxLength)] # all", "max([max(map(lambda x: len(x), seq)) for seq in sequences]) sequencePadded, sequenceLength", "= [], [] for line in f: line = line.strip()", "sequenceLength def padSequences(sequences, padtok, nlevels=1): if nlevels == 1: maxLength", "= line[0] if word in dictionary: embedding = [float(x) for", "sequence of labels tags: dict[\"O\"] = 4 Returns: list of", "loadDictionary(filename): try: d = dict() with open(filename, encoding='utf-8') as f:", "len(x), sequences)) sequencePadded, _ = _padSequences(sequencePadded, [padtok]*maxLengthWord, maxLengthSentence) sequenceLength, _", "have same length return sequencePadded, sequenceLength def padSequences(sequences, padtok, nlevels=1):", "= dict() with open(filename, encoding='utf-8') as f: for idx, word", "None, None for i, tok in enumerate(seq): # End of", "pass # end condition if chunkType is not None: chunk", "[padtok]*max(maxLength - len(seq), 0) sequencePadded += [seq_] sequenceLength += [min(len(seq),", "if len(words) != 0: niter += 1 if self.maxIter is", "wo file with vectors def getGloveDictionary(filename): printLog(\"Building dictionary\") dictGlove =", "sequences]) sequencePadded, sequenceLength = [], [] for seq in sequences:", "tagName = idxToTag[tok] tagClass = tagName.split('-')[0] tagType = tagName.split('-')[-1] return", "ParrotIOError(filename) def getProcessingWord(dictWords=None, dictChars=None, lowercase=False, chars=False, allowUNK=True): def f(word): #", "[min(len(seq), maxLength)] # all sublist have same length return sequencePadded,", "elif nlevels == 2: maxLengthWord = max([max(map(lambda x: len(x), seq))", "word.lower() if word.isdigit(): word = NUM # word id if", "if len(x_batch) == minibatchSize: yield x_batch, y_batch x_batch, y_batch =", "processingWord # function that takes a word as input self.processingTag", "def __init__(self, filename, processingWord=None, processingTag=None, maxIter=None): self.filename = filename self.processingWord", "if type(x[0]) == tuple: x = zip(*x) x_batch += [x]", "a dictionary from dataset def getDictionary(datasets): printLog(\"Building dictionary: \") dictWords", "(x, y) in data: if len(x_batch) == minibatchSize: yield x_batch,", "dataset def getDictionary(datasets): printLog(\"Building dictionary: \") dictWords = set() dictTags", "not find file {}.\".format(filename) super(ParrotIOError, self).__init__(message) # Class that iterates", "sequenceLength = [], [] for seq in sequences: # all", "for seq in sequences: # all words are same length", "if chunkType is not None: chunk = (chunkType, chunkStart, len(seq))", "in dataset: for word in words: dictChar.update(word) return dictChar #filename", "#filename - path wo file with vectors def getGloveDictionary(filename): printLog(\"Building", "data: return data[\"embeddings\"] except IOError: raise ParrotIOError(filename) def getProcessingWord(dictWords=None, dictChars=None,", "nlevels == 2: maxLengthWord = max([max(map(lambda x: len(x), seq)) for", "end condition if chunkType is not None: chunk = (chunkType,", "= tokChunkType, i else: pass # end condition if chunkType", "for line in f: line = line.strip().split(' ') word =", "char ids for word if (dictChars is not None) and", "4 Returns: list of (chunkType, chunkStart, chunkEnd) Example: seq =", "if self.processingTag is not None: tag = self.processingTag(tag) words +=", "1 if tok == default and chunkType is not None:", "padtok, nlevels=1): if nlevels == 1: maxLength = max(map(lambda x", "f.write(\"{}\\n\".format(word)) else: f.write(word) def loadDictionary(filename): try: d = dict() with", "End of a chunk + start of a chunk! elif", "saveDictionary(dictionary, filename): printLog(\"SAVE\") with open(filename, \"w\", encoding='utf-8') as f: for", "== True): # chars ids and word id return charIDs,", "i, tok in enumerate(seq): # End of a chunk 1", "dictGlove def saveDictionary(dictionary, filename): printLog(\"SAVE\") with open(filename, \"w\", encoding='utf-8') as", "+= [min(len(seq), maxLength)] # all sublist have same length return", "def getChunks(seq, tags): \"\"\"Given a sequence of tags, group entities", "dictWords.update(words) dictTags.update(tags) printLog(\"DONE: \" + str(len(dictWords)) + \" size\") return", "def __len__(self): if self.length is None: self.length = 0 for", "embeddings = np.zeros([len(dictionary), dim]) with open(gloveFilename, encoding='utf-8') as f: for", "start of a chunk! elif tok != default: tokChunkClass, tokChunkType", "sequencePadded, sequenceLength def minibatches(data, minibatchSize): x_batch, y_batch = [], []", "...] sequence of labels tags: dict[\"O\"] = 4 Returns: list", "tokChunkClass, tokChunkType = getChunkType(tok, idxToTag) if chunkType is None: chunkType,", "padtok, maxLength): sequencePadded, sequenceLength = [], [] for seq in", "line in f: line = line.strip().split(' ') word = line[0]", "tokChunkType, i elif tokChunkType != chunkType or tokChunkClass == \"B\":", ".logger import printLog UNK = \"$UNK$\" NUM = \"$NUM$\" NONE", "if (len(line) == 0 or line.startswith(\"-DOCSTART-\")): if len(words) != 0:", "of labels tags: dict[\"O\"] = 4 Returns: list of (chunkType,", "= max(map(lambda x : len(x), sequences)) sequencePadded, _ = _padSequences(sequencePadded,", "class CoNLLDataset(object): def __init__(self, filename, processingWord=None, processingTag=None, maxIter=None): self.filename =", "+= [sp] sequenceLength += [sl] maxLengthSentence = max(map(lambda x :", "yield words, tags words, tags = [], [] else: ls", "of a chunk! elif tok != default: tokChunkClass, tokChunkType =", "dictWords: word = dictWords[word] elif allowUNK: word = dictWords[UNK] else:", "dictionary: embedding = [float(x) for x in line[1:]] #glove coords", "# all sublist have same length return sequencePadded, sequenceLength def", "open(filename, encoding='utf-8') as f: for idx, word in enumerate(f): word", "def getGloveDictionary(filename): printLog(\"Building dictionary\") dictGlove = set() with open(filename, encoding='utf-8')", "word = word.strip() d[word] = idx except IOError: raise ParrotIOError(filename)", "== minibatchSize: yield x_batch, y_batch x_batch, y_batch = [], []", "chunkType, chunkStart = tokChunkType, i else: pass # end condition", "sequences: seq = list(seq) seq_ = seq[:maxLength] + [padtok]*max(maxLength -", "and (chars == True): # chars ids and word id", "np.zeros([len(dictionary), dim]) with open(gloveFilename, encoding='utf-8') as f: for line in", "in datasets: for words, tags in dataset: dictWords.update(words) dictTags.update(tags) printLog(\"DONE:" ]
[ "nox FILE_PATHS = [\"utils\", \"main.py\"] @nox.session def format(session): session.install(\"black\") session.run(\"black\",", "import nox FILE_PATHS = [\"utils\", \"main.py\"] @nox.session def format(session): session.install(\"black\")", "FILE_PATHS = [\"utils\", \"main.py\"] @nox.session def format(session): session.install(\"black\") session.run(\"black\", *FILE_PATHS)" ]
[ "pylint: disable=W0640 df = df.fillna(\"\") df.set_index(\" \", inplace=True) # Debug", "image = Image.open(imagefile) image = autocrop_image(image, 0) image.save(imagefile, \"PNG\", quality=100)", "] ] dindex = len(df.index) fig = df2img.plot_dataframe( df, fig_size=(800,", "[WSJ] Currencies\" embed = disnake.Embed(title=title, colour=cfg.COLOR) embed.set_image(url=f\"attachment://{imagefile}\") embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL,", "quality=100) image = disnake.File(imagefile) title = \"Economy: [WSJ] Currencies\" embed", "col, value in formats.items(): df[col] = df[col].map(lambda x: value.format(x)) #", "dindex = len(df.index) fig = df2img.plot_dataframe( df, fig_size=(800, (40 +", "ctx.send(embed=embed, file=image) except Exception as e: embed = disnake.Embed( title=\"ERROR", "= df[col].map(lambda x: value.format(x)) # pylint: disable=W0640 df = df.fillna(\"\")", "colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) await ctx.send(embed=embed, delete_after=30.0)", "= disnake.Embed( title=\"ERROR Economy: [WSJ] Currencies\", colour=cfg.COLOR, description=e, ) embed.set_author(", "autocrop_image(image, 0) image.save(imagefile, \"PNG\", quality=100) image = disnake.File(imagefile) title =", "discordbot.helpers import autocrop_image from gamestonk_terminal.economy import wsj_model async def currencies_command(ctx):", "formats = {\"Last\": \"{:.2f}\", \"Chng\": \"{:.2f}\", \"%Chng\": \"{:.2f}%\"} for col,", "filename=imagefile) image = Image.open(imagefile) image = autocrop_image(image, 0) image.save(imagefile, \"PNG\",", "df = df[ [ \"Last\", \"Chng\", \"%Chng\", ] ] dindex", "except Exception as e: embed = disnake.Embed( title=\"ERROR Economy: [WSJ]", "currencies_command(ctx): \"\"\"Currencies overview [Wall St. Journal]\"\"\" try: # Debug user", "as cfg from discordbot.config_discordbot import logger from discordbot.helpers import autocrop_image", "fig = df2img.plot_dataframe( df, fig_size=(800, (40 + (40 * dindex))),", "import discordbot.config_discordbot as cfg from discordbot.config_discordbot import logger from discordbot.helpers", "input if cfg.DEBUG: logger.debug(\"econ-currencies\") # Retrieve data df = wsj_model.global_currencies()", "df, fig_size=(800, (40 + (40 * dindex))), col_width=[8, 3, 3],", "height=35, ), template=\"plotly_dark\", font=dict( family=\"Consolas\", size=20, ), paper_bgcolor=\"rgba(0, 0, 0,", "try: # Debug user input if cfg.DEBUG: logger.debug(\"econ-currencies\") # Retrieve", "# Retrieve data df = wsj_model.global_currencies() df = pd.DataFrame.from_dict(df) #", "from gamestonk_terminal.economy import wsj_model async def currencies_command(ctx): \"\"\"Currencies overview [Wall", "\"{:.2f}\", \"Chng\": \"{:.2f}\", \"%Chng\": \"{:.2f}%\"} for col, value in formats.items():", "df2img.save_dataframe(fig=fig, filename=imagefile) image = Image.open(imagefile) image = autocrop_image(image, 0) image.save(imagefile,", "pandas as pd from PIL import Image import discordbot.config_discordbot as", "disnake import pandas as pd from PIL import Image import", "= pd.to_numeric(df[\"Chng\"].astype(float)) df[\"%Chng\"] = pd.to_numeric(df[\"%Chng\"].astype(float)) formats = {\"Last\": \"{:.2f}\", \"Chng\":", "x: value.format(x)) # pylint: disable=W0640 df = df.fillna(\"\") df.set_index(\" \",", "= \"econ-currencies.png\" df2img.save_dataframe(fig=fig, filename=imagefile) image = Image.open(imagefile) image = autocrop_image(image,", "embed = disnake.Embed( title=\"ERROR Economy: [WSJ] Currencies\", colour=cfg.COLOR, description=e, )", "title=\"ERROR Economy: [WSJ] Currencies\", colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL,", "import os import df2img import disnake import pandas as pd", "autocrop_image from gamestonk_terminal.economy import wsj_model async def currencies_command(ctx): \"\"\"Currencies overview", "0, 0, 0)\", ) imagefile = \"econ-currencies.png\" df2img.save_dataframe(fig=fig, filename=imagefile) image", "Economy: [WSJ] Currencies\", colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, )", "import logger from discordbot.helpers import autocrop_image from gamestonk_terminal.economy import wsj_model", "imagefile = \"econ-currencies.png\" df2img.save_dataframe(fig=fig, filename=imagefile) image = Image.open(imagefile) image =", "df.fillna(\"\") df.set_index(\" \", inplace=True) # Debug user output if cfg.DEBUG:", "inplace=True) # Debug user output if cfg.DEBUG: logger.debug(df.to_string()) df =", "colour=cfg.COLOR) embed.set_image(url=f\"attachment://{imagefile}\") embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) os.remove(imagefile) await ctx.send(embed=embed, file=image)", "available data found\") df[\"Last\"] = pd.to_numeric(df[\"Last\"].astype(float)) df[\"Chng\"] = pd.to_numeric(df[\"Chng\"].astype(float)) df[\"%Chng\"]", "import pandas as pd from PIL import Image import discordbot.config_discordbot", "\"{:.2f}\", \"%Chng\": \"{:.2f}%\"} for col, value in formats.items(): df[col] =", "name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) os.remove(imagefile) await ctx.send(embed=embed, file=image) except Exception as", "pd.to_numeric(df[\"Last\"].astype(float)) df[\"Chng\"] = pd.to_numeric(df[\"Chng\"].astype(float)) df[\"%Chng\"] = pd.to_numeric(df[\"%Chng\"].astype(float)) formats = {\"Last\":", "cfg from discordbot.config_discordbot import logger from discordbot.helpers import autocrop_image from", "= autocrop_image(image, 0) image.save(imagefile, \"PNG\", quality=100) image = disnake.File(imagefile) title", "as e: embed = disnake.Embed( title=\"ERROR Economy: [WSJ] Currencies\", colour=cfg.COLOR,", "title = \"Economy: [WSJ] Currencies\" embed = disnake.Embed(title=title, colour=cfg.COLOR) embed.set_image(url=f\"attachment://{imagefile}\")", "as pd from PIL import Image import discordbot.config_discordbot as cfg", "= df[ [ \"Last\", \"Chng\", \"%Chng\", ] ] dindex =", "\"Economy: [WSJ] Currencies\" embed = disnake.Embed(title=title, colour=cfg.COLOR) embed.set_image(url=f\"attachment://{imagefile}\") embed.set_author( name=cfg.AUTHOR_NAME,", "\"Chng\": \"{:.2f}\", \"%Chng\": \"{:.2f}%\"} for col, value in formats.items(): df[col]", "import autocrop_image from gamestonk_terminal.economy import wsj_model async def currencies_command(ctx): \"\"\"Currencies", "embed = disnake.Embed(title=title, colour=cfg.COLOR) embed.set_image(url=f\"attachment://{imagefile}\") embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) os.remove(imagefile)", "= len(df.index) fig = df2img.plot_dataframe( df, fig_size=(800, (40 + (40", "Journal]\"\"\" try: # Debug user input if cfg.DEBUG: logger.debug(\"econ-currencies\") #", "from PIL import Image import discordbot.config_discordbot as cfg from discordbot.config_discordbot", "3, 3], tbl_cells=dict( align=\"left\", height=35, ), template=\"plotly_dark\", font=dict( family=\"Consolas\", size=20,", "# Check for argument if df.empty: raise Exception(\"No available data", "icon_url=cfg.AUTHOR_ICON_URL, ) os.remove(imagefile) await ctx.send(embed=embed, file=image) except Exception as e:", "col_width=[8, 3, 3], tbl_cells=dict( align=\"left\", height=35, ), template=\"plotly_dark\", font=dict( family=\"Consolas\",", "= \"Economy: [WSJ] Currencies\" embed = disnake.Embed(title=title, colour=cfg.COLOR) embed.set_image(url=f\"attachment://{imagefile}\") embed.set_author(", "= disnake.Embed(title=title, colour=cfg.COLOR) embed.set_image(url=f\"attachment://{imagefile}\") embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) os.remove(imagefile) await", "\"%Chng\", ] ] dindex = len(df.index) fig = df2img.plot_dataframe( df,", "tbl_cells=dict( align=\"left\", height=35, ), template=\"plotly_dark\", font=dict( family=\"Consolas\", size=20, ), paper_bgcolor=\"rgba(0,", "= {\"Last\": \"{:.2f}\", \"Chng\": \"{:.2f}\", \"%Chng\": \"{:.2f}%\"} for col, value", "Currencies\" embed = disnake.Embed(title=title, colour=cfg.COLOR) embed.set_image(url=f\"attachment://{imagefile}\") embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, )", "Image.open(imagefile) image = autocrop_image(image, 0) image.save(imagefile, \"PNG\", quality=100) image =", "\"\"\"Currencies overview [Wall St. Journal]\"\"\" try: # Debug user input", "user input if cfg.DEBUG: logger.debug(\"econ-currencies\") # Retrieve data df =", "= wsj_model.global_currencies() df = pd.DataFrame.from_dict(df) # Check for argument if", "if df.empty: raise Exception(\"No available data found\") df[\"Last\"] = pd.to_numeric(df[\"Last\"].astype(float))", "\", inplace=True) # Debug user output if cfg.DEBUG: logger.debug(df.to_string()) df", "dindex))), col_width=[8, 3, 3], tbl_cells=dict( align=\"left\", height=35, ), template=\"plotly_dark\", font=dict(", "value.format(x)) # pylint: disable=W0640 df = df.fillna(\"\") df.set_index(\" \", inplace=True)", ") os.remove(imagefile) await ctx.send(embed=embed, file=image) except Exception as e: embed", "0, 0)\", ) imagefile = \"econ-currencies.png\" df2img.save_dataframe(fig=fig, filename=imagefile) image =", "discordbot.config_discordbot import logger from discordbot.helpers import autocrop_image from gamestonk_terminal.economy import", "from discordbot.helpers import autocrop_image from gamestonk_terminal.economy import wsj_model async def", "disnake.File(imagefile) title = \"Economy: [WSJ] Currencies\" embed = disnake.Embed(title=title, colour=cfg.COLOR)", "Check for argument if df.empty: raise Exception(\"No available data found\")", "raise Exception(\"No available data found\") df[\"Last\"] = pd.to_numeric(df[\"Last\"].astype(float)) df[\"Chng\"] =", "in formats.items(): df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640", "logger.debug(df.to_string()) df = df[ [ \"Last\", \"Chng\", \"%Chng\", ] ]", "3], tbl_cells=dict( align=\"left\", height=35, ), template=\"plotly_dark\", font=dict( family=\"Consolas\", size=20, ),", "(40 + (40 * dindex))), col_width=[8, 3, 3], tbl_cells=dict( align=\"left\",", "] dindex = len(df.index) fig = df2img.plot_dataframe( df, fig_size=(800, (40", "# pylint: disable=W0640 df = df.fillna(\"\") df.set_index(\" \", inplace=True) #", "[WSJ] Currencies\", colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) await", "for col, value in formats.items(): df[col] = df[col].map(lambda x: value.format(x))", "+ (40 * dindex))), col_width=[8, 3, 3], tbl_cells=dict( align=\"left\", height=35,", "template=\"plotly_dark\", font=dict( family=\"Consolas\", size=20, ), paper_bgcolor=\"rgba(0, 0, 0, 0)\", )", "df.empty: raise Exception(\"No available data found\") df[\"Last\"] = pd.to_numeric(df[\"Last\"].astype(float)) df[\"Chng\"]", "Exception(\"No available data found\") df[\"Last\"] = pd.to_numeric(df[\"Last\"].astype(float)) df[\"Chng\"] = pd.to_numeric(df[\"Chng\"].astype(float))", "df[ [ \"Last\", \"Chng\", \"%Chng\", ] ] dindex = len(df.index)", "), template=\"plotly_dark\", font=dict( family=\"Consolas\", size=20, ), paper_bgcolor=\"rgba(0, 0, 0, 0)\",", "df[\"Chng\"] = pd.to_numeric(df[\"Chng\"].astype(float)) df[\"%Chng\"] = pd.to_numeric(df[\"%Chng\"].astype(float)) formats = {\"Last\": \"{:.2f}\",", "* dindex))), col_width=[8, 3, 3], tbl_cells=dict( align=\"left\", height=35, ), template=\"plotly_dark\",", "cfg.DEBUG: logger.debug(\"econ-currencies\") # Retrieve data df = wsj_model.global_currencies() df =", "df2img.plot_dataframe( df, fig_size=(800, (40 + (40 * dindex))), col_width=[8, 3,", "# Debug user input if cfg.DEBUG: logger.debug(\"econ-currencies\") # Retrieve data", "for argument if df.empty: raise Exception(\"No available data found\") df[\"Last\"]", "df = df.fillna(\"\") df.set_index(\" \", inplace=True) # Debug user output", "\"%Chng\": \"{:.2f}%\"} for col, value in formats.items(): df[col] = df[col].map(lambda", "await ctx.send(embed=embed, file=image) except Exception as e: embed = disnake.Embed(", "overview [Wall St. Journal]\"\"\" try: # Debug user input if", "wsj_model.global_currencies() df = pd.DataFrame.from_dict(df) # Check for argument if df.empty:", "\"{:.2f}%\"} for col, value in formats.items(): df[col] = df[col].map(lambda x:", "pd from PIL import Image import discordbot.config_discordbot as cfg from", "df.set_index(\" \", inplace=True) # Debug user output if cfg.DEBUG: logger.debug(df.to_string())", "df[\"%Chng\"] = pd.to_numeric(df[\"%Chng\"].astype(float)) formats = {\"Last\": \"{:.2f}\", \"Chng\": \"{:.2f}\", \"%Chng\":", "embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) os.remove(imagefile) await ctx.send(embed=embed, file=image) except Exception", "Image import discordbot.config_discordbot as cfg from discordbot.config_discordbot import logger from", "pd.DataFrame.from_dict(df) # Check for argument if df.empty: raise Exception(\"No available", "df2img import disnake import pandas as pd from PIL import", "len(df.index) fig = df2img.plot_dataframe( df, fig_size=(800, (40 + (40 *", "paper_bgcolor=\"rgba(0, 0, 0, 0)\", ) imagefile = \"econ-currencies.png\" df2img.save_dataframe(fig=fig, filename=imagefile)", "image.save(imagefile, \"PNG\", quality=100) image = disnake.File(imagefile) title = \"Economy: [WSJ]", "Currencies\", colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) await ctx.send(embed=embed,", "data found\") df[\"Last\"] = pd.to_numeric(df[\"Last\"].astype(float)) df[\"Chng\"] = pd.to_numeric(df[\"Chng\"].astype(float)) df[\"%Chng\"] =", "found\") df[\"Last\"] = pd.to_numeric(df[\"Last\"].astype(float)) df[\"Chng\"] = pd.to_numeric(df[\"Chng\"].astype(float)) df[\"%Chng\"] = pd.to_numeric(df[\"%Chng\"].astype(float))", "gamestonk_terminal.economy import wsj_model async def currencies_command(ctx): \"\"\"Currencies overview [Wall St.", "data df = wsj_model.global_currencies() df = pd.DataFrame.from_dict(df) # Check for", "file=image) except Exception as e: embed = disnake.Embed( title=\"ERROR Economy:", "(40 * dindex))), col_width=[8, 3, 3], tbl_cells=dict( align=\"left\", height=35, ),", "# Debug user output if cfg.DEBUG: logger.debug(df.to_string()) df = df[", "if cfg.DEBUG: logger.debug(\"econ-currencies\") # Retrieve data df = wsj_model.global_currencies() df", "disnake.Embed( title=\"ERROR Economy: [WSJ] Currencies\", colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME,", "= df.fillna(\"\") df.set_index(\" \", inplace=True) # Debug user output if", "[Wall St. Journal]\"\"\" try: # Debug user input if cfg.DEBUG:", ") imagefile = \"econ-currencies.png\" df2img.save_dataframe(fig=fig, filename=imagefile) image = Image.open(imagefile) image", "align=\"left\", height=35, ), template=\"plotly_dark\", font=dict( family=\"Consolas\", size=20, ), paper_bgcolor=\"rgba(0, 0,", "Retrieve data df = wsj_model.global_currencies() df = pd.DataFrame.from_dict(df) # Check", "), paper_bgcolor=\"rgba(0, 0, 0, 0)\", ) imagefile = \"econ-currencies.png\" df2img.save_dataframe(fig=fig,", "{\"Last\": \"{:.2f}\", \"Chng\": \"{:.2f}\", \"%Chng\": \"{:.2f}%\"} for col, value in", "\"Last\", \"Chng\", \"%Chng\", ] ] dindex = len(df.index) fig =", "formats.items(): df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640 df", "import wsj_model async def currencies_command(ctx): \"\"\"Currencies overview [Wall St. Journal]\"\"\"", "df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640 df =", "df[col].map(lambda x: value.format(x)) # pylint: disable=W0640 df = df.fillna(\"\") df.set_index(\"", "PIL import Image import discordbot.config_discordbot as cfg from discordbot.config_discordbot import", "= disnake.File(imagefile) title = \"Economy: [WSJ] Currencies\" embed = disnake.Embed(title=title,", "df[\"Last\"] = pd.to_numeric(df[\"Last\"].astype(float)) df[\"Chng\"] = pd.to_numeric(df[\"Chng\"].astype(float)) df[\"%Chng\"] = pd.to_numeric(df[\"%Chng\"].astype(float)) formats", "0)\", ) imagefile = \"econ-currencies.png\" df2img.save_dataframe(fig=fig, filename=imagefile) image = Image.open(imagefile)", "import Image import discordbot.config_discordbot as cfg from discordbot.config_discordbot import logger", "logger.debug(\"econ-currencies\") # Retrieve data df = wsj_model.global_currencies() df = pd.DataFrame.from_dict(df)", "os.remove(imagefile) await ctx.send(embed=embed, file=image) except Exception as e: embed =", "Debug user input if cfg.DEBUG: logger.debug(\"econ-currencies\") # Retrieve data df", "image = autocrop_image(image, 0) image.save(imagefile, \"PNG\", quality=100) image = disnake.File(imagefile)", "argument if df.empty: raise Exception(\"No available data found\") df[\"Last\"] =", "family=\"Consolas\", size=20, ), paper_bgcolor=\"rgba(0, 0, 0, 0)\", ) imagefile =", "def currencies_command(ctx): \"\"\"Currencies overview [Wall St. Journal]\"\"\" try: # Debug", "St. Journal]\"\"\" try: # Debug user input if cfg.DEBUG: logger.debug(\"econ-currencies\")", "async def currencies_command(ctx): \"\"\"Currencies overview [Wall St. Journal]\"\"\" try: #", "disable=W0640 df = df.fillna(\"\") df.set_index(\" \", inplace=True) # Debug user", "= pd.to_numeric(df[\"Last\"].astype(float)) df[\"Chng\"] = pd.to_numeric(df[\"Chng\"].astype(float)) df[\"%Chng\"] = pd.to_numeric(df[\"%Chng\"].astype(float)) formats =", "user output if cfg.DEBUG: logger.debug(df.to_string()) df = df[ [ \"Last\",", "wsj_model async def currencies_command(ctx): \"\"\"Currencies overview [Wall St. Journal]\"\"\" try:", "font=dict( family=\"Consolas\", size=20, ), paper_bgcolor=\"rgba(0, 0, 0, 0)\", ) imagefile", "pd.to_numeric(df[\"Chng\"].astype(float)) df[\"%Chng\"] = pd.to_numeric(df[\"%Chng\"].astype(float)) formats = {\"Last\": \"{:.2f}\", \"Chng\": \"{:.2f}\",", "pd.to_numeric(df[\"%Chng\"].astype(float)) formats = {\"Last\": \"{:.2f}\", \"Chng\": \"{:.2f}\", \"%Chng\": \"{:.2f}%\"} for", "\"PNG\", quality=100) image = disnake.File(imagefile) title = \"Economy: [WSJ] Currencies\"", "os import df2img import disnake import pandas as pd from", "[ \"Last\", \"Chng\", \"%Chng\", ] ] dindex = len(df.index) fig", "df = pd.DataFrame.from_dict(df) # Check for argument if df.empty: raise", "if cfg.DEBUG: logger.debug(df.to_string()) df = df[ [ \"Last\", \"Chng\", \"%Chng\",", "import df2img import disnake import pandas as pd from PIL", "cfg.DEBUG: logger.debug(df.to_string()) df = df[ [ \"Last\", \"Chng\", \"%Chng\", ]", "import disnake import pandas as pd from PIL import Image", "Debug user output if cfg.DEBUG: logger.debug(df.to_string()) df = df[ [", "e: embed = disnake.Embed( title=\"ERROR Economy: [WSJ] Currencies\", colour=cfg.COLOR, description=e,", "= Image.open(imagefile) image = autocrop_image(image, 0) image.save(imagefile, \"PNG\", quality=100) image", "output if cfg.DEBUG: logger.debug(df.to_string()) df = df[ [ \"Last\", \"Chng\",", "value in formats.items(): df[col] = df[col].map(lambda x: value.format(x)) # pylint:", "0) image.save(imagefile, \"PNG\", quality=100) image = disnake.File(imagefile) title = \"Economy:", "image = disnake.File(imagefile) title = \"Economy: [WSJ] Currencies\" embed =", "df = wsj_model.global_currencies() df = pd.DataFrame.from_dict(df) # Check for argument", "fig_size=(800, (40 + (40 * dindex))), col_width=[8, 3, 3], tbl_cells=dict(", "disnake.Embed(title=title, colour=cfg.COLOR) embed.set_image(url=f\"attachment://{imagefile}\") embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) os.remove(imagefile) await ctx.send(embed=embed,", "embed.set_image(url=f\"attachment://{imagefile}\") embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) os.remove(imagefile) await ctx.send(embed=embed, file=image) except", "Exception as e: embed = disnake.Embed( title=\"ERROR Economy: [WSJ] Currencies\",", "logger from discordbot.helpers import autocrop_image from gamestonk_terminal.economy import wsj_model async", "\"econ-currencies.png\" df2img.save_dataframe(fig=fig, filename=imagefile) image = Image.open(imagefile) image = autocrop_image(image, 0)", "= df2img.plot_dataframe( df, fig_size=(800, (40 + (40 * dindex))), col_width=[8,", "size=20, ), paper_bgcolor=\"rgba(0, 0, 0, 0)\", ) imagefile = \"econ-currencies.png\"", "\"Chng\", \"%Chng\", ] ] dindex = len(df.index) fig = df2img.plot_dataframe(", "from discordbot.config_discordbot import logger from discordbot.helpers import autocrop_image from gamestonk_terminal.economy", "= pd.DataFrame.from_dict(df) # Check for argument if df.empty: raise Exception(\"No", "= pd.to_numeric(df[\"%Chng\"].astype(float)) formats = {\"Last\": \"{:.2f}\", \"Chng\": \"{:.2f}\", \"%Chng\": \"{:.2f}%\"}", "discordbot.config_discordbot as cfg from discordbot.config_discordbot import logger from discordbot.helpers import" ]
[ "recipe_name[:-5] else: raise FileExistsError('File in layer recipes folder does not", "os import yaml import logging logger = logging.getLogger(__name__) class Configs:", "''' # the following model structures for recipes / layers", "Plugins.load() Plugins.hook('before_load', config=Configs) def load_recipe(data): name = data.get('name', 'default') if", "None)) class Layer(dict): def __init__(self, recipe, layer_data): self.recipe = recipe", "= Recipe(load_recipe) # add the recipe name based on the", "name in RECIPES: raise ValueError('Recipe with name {} already exist'.format(name))", "the following model structures for recipes / layers / queries", "the chain # for attributes. If not found in the", "# add the recipe name based on the file name", "def load_layers(self, layers): self.layers = {} for layer in layers:", "layers): self.layers = {} for layer in layers: self.layers[layer['name']] =", "@property def description(self): return self.get('description', 'no description provided') class Query(dict):", "'no description provided') class Query(dict): def __init__(self, layer, data): self.layer", "self.layer = layer super().__init__(data) def __getattr__(self, attr): return self.get(attr, getattr(self.layer,", "== '.yml': recipe_name = recipe_name[:-4] elif recipe_name[-5:] == '.yaml': recipe_name", "in layer recipes folder does not have a YAML extension:", "logging logger = logging.getLogger(__name__) class Configs: server = None recipes", "if name in RECIPES: raise ValueError('Recipe with name {} already", "YAML extension: {0}'.format(recipe_configs)) with open(recipe_configs) as r_c: load_recipe = yaml.load(r_c.read())", "'.yml': recipe_name = recipe_name[:-4] elif recipe_name[-5:] == '.yaml': recipe_name =", "/ layers / queries allows searching up the chain #", "class Recipe(dict): def __init__(self, data): super().__init__(data) self.load_layers(data['layers']) def load_layers(self, layers):", "Layer(self, layer) def __getattr__(self, attr): return self.get(attr, Configs.server.get(attr, None)) class", "class Query(dict): def __init__(self, layer, data): self.layer = layer super().__init__(data)", "level then it will check the server configs. class Recipe(dict):", "If not found in the root recipes level then it", "name = data.get('name', 'default') if name in RECIPES: raise ValueError('Recipe", "for recipes / layers / queries allows searching up the", "tilejson query cls.recipes[recipe_name].name = recipe_name logger.info('Adding layer: {0}'.format(recipe_name)) ''' Plugins.load()", "already exist'.format(name)) data['name'] = name RECIPES[name] = Recipe(data) if len(RECIPES)", "recipe name based on the file name # this is", "based on the file name # this is needed by", "open(server_configs) as s_c: cls.server = yaml.load(s_c.read()) @classmethod def init_layer_recipes(cls, recipe_configs):", "and name != 'default': RECIPES['default'] = RECIPES[data['name']] for recipe in", "FileExistsError('File in layer recipes folder does not have a YAML", "super().__init__(data) self.load_layers(data['layers']) def load_layers(self, layers): self.layers = {} for layer", "name != 'default': RECIPES['default'] = RECIPES[data['name']] for recipe in Configs.layers:", "= os.path.normpath(recipe_configs).split('\\\\')[-1] if recipe_name[-4:] == '.yml': recipe_name = recipe_name[:-4] elif", "<filename>aiovectortiler/config_handler.py<gh_stars>1-10 import os import yaml import logging logger = logging.getLogger(__name__)", "have a YAML extension: {0}'.format(recipe_configs)) with open(recipe_configs) as r_c: load_recipe", "following model structures for recipes / layers / queries allows", "= recipe super().__init__(layer_data) self.load_queries(layer_data['queries']) def load_queries(self, queries): self.queries = []", "chain # for attributes. If not found in the root", "recipe_name[-4:] == '.yml': recipe_name = recipe_name[:-4] elif recipe_name[-5:] == '.yaml':", "for query in queries: self.queries.append(Query(self, query)) def __getattr__(self, attr): return", "{} for layer in layers: self.layers[layer['name']] = Layer(self, layer) def", "self.queries.append(Query(self, query)) def __getattr__(self, attr): return self.get(attr, getattr(self.recipe, attr)) @property", "raise ValueError('Recipe with name {} already exist'.format(name)) data['name'] = name", "None plugins = None @classmethod def init_server_configs(cls, server_configs): with open(server_configs)", "check the server configs. class Recipe(dict): def __init__(self, data): super().__init__(data)", "not have a YAML extension: {0}'.format(recipe_configs)) with open(recipe_configs) as r_c:", "{0}'.format(recipe_configs)) with open(recipe_configs) as r_c: load_recipe = yaml.load(r_c.read()) cls.recipes[recipe_name] =", "= yaml.load(s_c.read()) @classmethod def init_layer_recipes(cls, recipe_configs): recipe_name = None if", "layer in layers: self.layers[layer['name']] = Layer(self, layer) def __getattr__(self, attr):", "recipe, layer_data): self.recipe = recipe super().__init__(layer_data) self.load_queries(layer_data['queries']) def load_queries(self, queries):", "return self.get(attr, getattr(self.recipe, attr)) @property def id(self): return '{0}:{1}'.format(self.recipe.name, self.name)", "= logging.getLogger(__name__) class Configs: server = None recipes = {}", "plugins = None @classmethod def init_server_configs(cls, server_configs): with open(server_configs) as", "in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('/')[-1] # for windows elif '\\\\'", "recipe_name[-5:] == '.yaml': recipe_name = recipe_name[:-5] else: raise FileExistsError('File in", "recipe_name = recipe_name[:-4] elif recipe_name[-5:] == '.yaml': recipe_name = recipe_name[:-5]", "RECIPES['default'] = RECIPES[data['name']] for recipe in Configs.layers: with Path(recipe).open() as", "super().__init__(layer_data) self.load_queries(layer_data['queries']) def load_queries(self, queries): self.queries = [] for query", "yaml.load(r_c.read()) cls.recipes[recipe_name] = Recipe(load_recipe) # add the recipe name based", "then it will check the server configs. class Recipe(dict): def", "def init_server_configs(cls, server_configs): with open(server_configs) as s_c: cls.server = yaml.load(s_c.read())", "id(self): return '{0}:{1}'.format(self.recipe.name, self.name) @property def description(self): return self.get('description', 'no", "self.name) @property def description(self): return self.get('description', 'no description provided') class", "= recipe_name[:-5] else: raise FileExistsError('File in layer recipes folder does", "layer) def __getattr__(self, attr): return self.get(attr, Configs.server.get(attr, None)) class Layer(dict):", "len(RECIPES) == 1 and name != 'default': RECIPES['default'] = RECIPES[data['name']]", "found in the root recipes level then it will check", "name # this is needed by the tilejson query cls.recipes[recipe_name].name", "recipe_name = None if '/' in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('/')[-1]", "load_recipe = yaml.load(r_c.read()) cls.recipes[recipe_name] = Recipe(load_recipe) # add the recipe", "self.layers = {} for layer in layers: self.layers[layer['name']] = Layer(self,", "r_c: load_recipe = yaml.load(r_c.read()) cls.recipes[recipe_name] = Recipe(load_recipe) # add the", "exist'.format(name)) data['name'] = name RECIPES[name] = Recipe(data) if len(RECIPES) ==", "@property def id(self): return '{0}:{1}'.format(self.recipe.name, self.name) @property def description(self): return", "with open(server_configs) as s_c: cls.server = yaml.load(s_c.read()) @classmethod def init_layer_recipes(cls,", "data): super().__init__(data) self.load_layers(data['layers']) def load_layers(self, layers): self.layers = {} for", "layer_data): self.recipe = recipe super().__init__(layer_data) self.load_queries(layer_data['queries']) def load_queries(self, queries): self.queries", "provided') class Query(dict): def __init__(self, layer, data): self.layer = layer", "[] for query in queries: self.queries.append(Query(self, query)) def __getattr__(self, attr):", "recipes = {} DB = None plugins = None @classmethod", "Configs.layers: with Path(recipe).open() as f: load_recipe(yaml.load(f.read())) Plugins.hook('load', config=config, recipes=RECIPES) '''", "root recipes level then it will check the server configs.", "server = None recipes = {} DB = None plugins", "elif recipe_name[-5:] == '.yaml': recipe_name = recipe_name[:-5] else: raise FileExistsError('File", "model structures for recipes / layers / queries allows searching", "windows elif '\\\\' in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('\\\\')[-1] if recipe_name[-4:]", "recipes=RECIPES) ''' # the following model structures for recipes /", "in layers: self.layers[layer['name']] = Layer(self, layer) def __getattr__(self, attr): return", "for windows elif '\\\\' in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('\\\\')[-1] if", "load_recipe(yaml.load(f.read())) Plugins.hook('load', config=config, recipes=RECIPES) ''' # the following model structures", "if recipe_name[-4:] == '.yml': recipe_name = recipe_name[:-4] elif recipe_name[-5:] ==", "= recipe_name logger.info('Adding layer: {0}'.format(recipe_name)) ''' Plugins.load() Plugins.hook('before_load', config=Configs) def", "description(self): return self.get('description', 'no description provided') class Query(dict): def __init__(self,", "searching up the chain # for attributes. If not found", "config=Configs) def load_recipe(data): name = data.get('name', 'default') if name in", "the root recipes level then it will check the server", "= yaml.load(r_c.read()) cls.recipes[recipe_name] = Recipe(load_recipe) # add the recipe name", "def __init__(self, data): super().__init__(data) self.load_layers(data['layers']) def load_layers(self, layers): self.layers =", "query cls.recipes[recipe_name].name = recipe_name logger.info('Adding layer: {0}'.format(recipe_name)) ''' Plugins.load() Plugins.hook('before_load',", "yaml import logging logger = logging.getLogger(__name__) class Configs: server =", "# this is needed by the tilejson query cls.recipes[recipe_name].name =", "the recipe name based on the file name # this", "class Layer(dict): def __init__(self, recipe, layer_data): self.recipe = recipe super().__init__(layer_data)", "raise FileExistsError('File in layer recipes folder does not have a", "server configs. class Recipe(dict): def __init__(self, data): super().__init__(data) self.load_layers(data['layers']) def", "self.get(attr, Configs.server.get(attr, None)) class Layer(dict): def __init__(self, recipe, layer_data): self.recipe", "= None recipes = {} DB = None plugins =", "init_server_configs(cls, server_configs): with open(server_configs) as s_c: cls.server = yaml.load(s_c.read()) @classmethod", "recipe_name = os.path.normpath(recipe_configs).split('/')[-1] # for windows elif '\\\\' in recipe_configs:", "else: raise FileExistsError('File in layer recipes folder does not have", "Plugins.hook('load', config=config, recipes=RECIPES) ''' # the following model structures for", "= name RECIPES[name] = Recipe(data) if len(RECIPES) == 1 and", "= layer super().__init__(data) def __getattr__(self, attr): return self.get(attr, getattr(self.layer, attr))", "cls.server = yaml.load(s_c.read()) @classmethod def init_layer_recipes(cls, recipe_configs): recipe_name = None", "= None plugins = None @classmethod def init_server_configs(cls, server_configs): with", "= None if '/' in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('/')[-1] #", "def __getattr__(self, attr): return self.get(attr, getattr(self.recipe, attr)) @property def id(self):", "'.yaml': recipe_name = recipe_name[:-5] else: raise FileExistsError('File in layer recipes", "the server configs. class Recipe(dict): def __init__(self, data): super().__init__(data) self.load_layers(data['layers'])", "Query(dict): def __init__(self, layer, data): self.layer = layer super().__init__(data) def", "layer, data): self.layer = layer super().__init__(data) def __getattr__(self, attr): return", "{} DB = None plugins = None @classmethod def init_server_configs(cls,", "Recipe(load_recipe) # add the recipe name based on the file", "'default': RECIPES['default'] = RECIPES[data['name']] for recipe in Configs.layers: with Path(recipe).open()", "with name {} already exist'.format(name)) data['name'] = name RECIPES[name] =", "load_layers(self, layers): self.layers = {} for layer in layers: self.layers[layer['name']]", "queries): self.queries = [] for query in queries: self.queries.append(Query(self, query))", "__init__(self, layer, data): self.layer = layer super().__init__(data) def __getattr__(self, attr):", "recipe_name logger.info('Adding layer: {0}'.format(recipe_name)) ''' Plugins.load() Plugins.hook('before_load', config=Configs) def load_recipe(data):", "= None @classmethod def init_server_configs(cls, server_configs): with open(server_configs) as s_c:", "== '.yaml': recipe_name = recipe_name[:-5] else: raise FileExistsError('File in layer", "query)) def __getattr__(self, attr): return self.get(attr, getattr(self.recipe, attr)) @property def", "recipes folder does not have a YAML extension: {0}'.format(recipe_configs)) with", "'default') if name in RECIPES: raise ValueError('Recipe with name {}", "def __getattr__(self, attr): return self.get(attr, Configs.server.get(attr, None)) class Layer(dict): def", "name based on the file name # this is needed", "= Layer(self, layer) def __getattr__(self, attr): return self.get(attr, Configs.server.get(attr, None))", "Configs: server = None recipes = {} DB = None", "= {} for layer in layers: self.layers[layer['name']] = Layer(self, layer)", "{} already exist'.format(name)) data['name'] = name RECIPES[name] = Recipe(data) if", "import logging logger = logging.getLogger(__name__) class Configs: server = None", "name RECIPES[name] = Recipe(data) if len(RECIPES) == 1 and name", "'{0}:{1}'.format(self.recipe.name, self.name) @property def description(self): return self.get('description', 'no description provided')", "None recipes = {} DB = None plugins = None", "self.load_queries(layer_data['queries']) def load_queries(self, queries): self.queries = [] for query in", "import os import yaml import logging logger = logging.getLogger(__name__) class", "recipe_name = os.path.normpath(recipe_configs).split('\\\\')[-1] if recipe_name[-4:] == '.yml': recipe_name = recipe_name[:-4]", "os.path.normpath(recipe_configs).split('\\\\')[-1] if recipe_name[-4:] == '.yml': recipe_name = recipe_name[:-4] elif recipe_name[-5:]", "== 1 and name != 'default': RECIPES['default'] = RECIPES[data['name']] for", "def __init__(self, layer, data): self.layer = layer super().__init__(data) def __getattr__(self,", "1 and name != 'default': RECIPES['default'] = RECIPES[data['name']] for recipe", "RECIPES: raise ValueError('Recipe with name {} already exist'.format(name)) data['name'] =", "config=config, recipes=RECIPES) ''' # the following model structures for recipes", "if '/' in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('/')[-1] # for windows", "name {} already exist'.format(name)) data['name'] = name RECIPES[name] = Recipe(data)", "with Path(recipe).open() as f: load_recipe(yaml.load(f.read())) Plugins.hook('load', config=config, recipes=RECIPES) ''' #", "cls.recipes[recipe_name].name = recipe_name logger.info('Adding layer: {0}'.format(recipe_name)) ''' Plugins.load() Plugins.hook('before_load', config=Configs)", "file name # this is needed by the tilejson query", "attributes. If not found in the root recipes level then", "recipe_configs): recipe_name = None if '/' in recipe_configs: recipe_name =", "None if '/' in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('/')[-1] # for", "= recipe_name[:-4] elif recipe_name[-5:] == '.yaml': recipe_name = recipe_name[:-5] else:", "is needed by the tilejson query cls.recipes[recipe_name].name = recipe_name logger.info('Adding", "for layer in layers: self.layers[layer['name']] = Layer(self, layer) def __getattr__(self,", "recipe super().__init__(layer_data) self.load_queries(layer_data['queries']) def load_queries(self, queries): self.queries = [] for", "Recipe(data) if len(RECIPES) == 1 and name != 'default': RECIPES['default']", "as r_c: load_recipe = yaml.load(r_c.read()) cls.recipes[recipe_name] = Recipe(load_recipe) # add", "as s_c: cls.server = yaml.load(s_c.read()) @classmethod def init_layer_recipes(cls, recipe_configs): recipe_name", "class Configs: server = None recipes = {} DB =", "= RECIPES[data['name']] for recipe in Configs.layers: with Path(recipe).open() as f:", "description provided') class Query(dict): def __init__(self, layer, data): self.layer =", "return '{0}:{1}'.format(self.recipe.name, self.name) @property def description(self): return self.get('description', 'no description", "layer: {0}'.format(recipe_name)) ''' Plugins.load() Plugins.hook('before_load', config=Configs) def load_recipe(data): name =", "self.recipe = recipe super().__init__(layer_data) self.load_queries(layer_data['queries']) def load_queries(self, queries): self.queries =", "Path(recipe).open() as f: load_recipe(yaml.load(f.read())) Plugins.hook('load', config=config, recipes=RECIPES) ''' # the", "def init_layer_recipes(cls, recipe_configs): recipe_name = None if '/' in recipe_configs:", "@classmethod def init_server_configs(cls, server_configs): with open(server_configs) as s_c: cls.server =", "= os.path.normpath(recipe_configs).split('/')[-1] # for windows elif '\\\\' in recipe_configs: recipe_name", "layers: self.layers[layer['name']] = Layer(self, layer) def __getattr__(self, attr): return self.get(attr,", "layers / queries allows searching up the chain # for", "logging.getLogger(__name__) class Configs: server = None recipes = {} DB", "s_c: cls.server = yaml.load(s_c.read()) @classmethod def init_layer_recipes(cls, recipe_configs): recipe_name =", "__getattr__(self, attr): return self.get(attr, Configs.server.get(attr, None)) class Layer(dict): def __init__(self,", "queries: self.queries.append(Query(self, query)) def __getattr__(self, attr): return self.get(attr, getattr(self.recipe, attr))", "logger = logging.getLogger(__name__) class Configs: server = None recipes =", "yaml.load(s_c.read()) @classmethod def init_layer_recipes(cls, recipe_configs): recipe_name = None if '/'", "= {} DB = None plugins = None @classmethod def", "allows searching up the chain # for attributes. If not", "up the chain # for attributes. If not found in", "= [] for query in queries: self.queries.append(Query(self, query)) def __getattr__(self,", "self.load_layers(data['layers']) def load_layers(self, layers): self.layers = {} for layer in", "queries allows searching up the chain # for attributes. If", "= Recipe(data) if len(RECIPES) == 1 and name != 'default':", "elif '\\\\' in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('\\\\')[-1] if recipe_name[-4:] ==", "structures for recipes / layers / queries allows searching up", "def load_recipe(data): name = data.get('name', 'default') if name in RECIPES:", "RECIPES[name] = Recipe(data) if len(RECIPES) == 1 and name !=", "the file name # this is needed by the tilejson", "/ queries allows searching up the chain # for attributes.", "data['name'] = name RECIPES[name] = Recipe(data) if len(RECIPES) == 1", "in Configs.layers: with Path(recipe).open() as f: load_recipe(yaml.load(f.read())) Plugins.hook('load', config=config, recipes=RECIPES)", "RECIPES[data['name']] for recipe in Configs.layers: with Path(recipe).open() as f: load_recipe(yaml.load(f.read()))", "f: load_recipe(yaml.load(f.read())) Plugins.hook('load', config=config, recipes=RECIPES) ''' # the following model", "return self.get(attr, Configs.server.get(attr, None)) class Layer(dict): def __init__(self, recipe, layer_data):", "in queries: self.queries.append(Query(self, query)) def __getattr__(self, attr): return self.get(attr, getattr(self.recipe,", "def description(self): return self.get('description', 'no description provided') class Query(dict): def", "ValueError('Recipe with name {} already exist'.format(name)) data['name'] = name RECIPES[name]", "by the tilejson query cls.recipes[recipe_name].name = recipe_name logger.info('Adding layer: {0}'.format(recipe_name))", "server_configs): with open(server_configs) as s_c: cls.server = yaml.load(s_c.read()) @classmethod def", "recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('/')[-1] # for windows elif '\\\\' in", "layer recipes folder does not have a YAML extension: {0}'.format(recipe_configs))", "folder does not have a YAML extension: {0}'.format(recipe_configs)) with open(recipe_configs)", "for attributes. If not found in the root recipes level", "data.get('name', 'default') if name in RECIPES: raise ValueError('Recipe with name", "extension: {0}'.format(recipe_configs)) with open(recipe_configs) as r_c: load_recipe = yaml.load(r_c.read()) cls.recipes[recipe_name]", "'\\\\' in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('\\\\')[-1] if recipe_name[-4:] == '.yml':", "in the root recipes level then it will check the", "will check the server configs. class Recipe(dict): def __init__(self, data):", "def __init__(self, recipe, layer_data): self.recipe = recipe super().__init__(layer_data) self.load_queries(layer_data['queries']) def", "attr)) @property def id(self): return '{0}:{1}'.format(self.recipe.name, self.name) @property def description(self):", "in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('\\\\')[-1] if recipe_name[-4:] == '.yml': recipe_name", "recipes / layers / queries allows searching up the chain", "recipe_name[:-4] elif recipe_name[-5:] == '.yaml': recipe_name = recipe_name[:-5] else: raise", "# the following model structures for recipes / layers /", "Configs.server.get(attr, None)) class Layer(dict): def __init__(self, recipe, layer_data): self.recipe =", "attr): return self.get(attr, Configs.server.get(attr, None)) class Layer(dict): def __init__(self, recipe,", "@classmethod def init_layer_recipes(cls, recipe_configs): recipe_name = None if '/' in", "= data.get('name', 'default') if name in RECIPES: raise ValueError('Recipe with", "None @classmethod def init_server_configs(cls, server_configs): with open(server_configs) as s_c: cls.server", "with open(recipe_configs) as r_c: load_recipe = yaml.load(r_c.read()) cls.recipes[recipe_name] = Recipe(load_recipe)", "def id(self): return '{0}:{1}'.format(self.recipe.name, self.name) @property def description(self): return self.get('description',", "!= 'default': RECIPES['default'] = RECIPES[data['name']] for recipe in Configs.layers: with", "recipe_name = recipe_name[:-5] else: raise FileExistsError('File in layer recipes folder", "recipes level then it will check the server configs. class", "self.queries = [] for query in queries: self.queries.append(Query(self, query)) def", "__getattr__(self, attr): return self.get(attr, getattr(self.recipe, attr)) @property def id(self): return", "attr): return self.get(attr, getattr(self.recipe, attr)) @property def id(self): return '{0}:{1}'.format(self.recipe.name,", "it will check the server configs. class Recipe(dict): def __init__(self,", "'/' in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('/')[-1] # for windows elif", "recipe in Configs.layers: with Path(recipe).open() as f: load_recipe(yaml.load(f.read())) Plugins.hook('load', config=config,", "__init__(self, recipe, layer_data): self.recipe = recipe super().__init__(layer_data) self.load_queries(layer_data['queries']) def load_queries(self,", "load_queries(self, queries): self.queries = [] for query in queries: self.queries.append(Query(self,", "in RECIPES: raise ValueError('Recipe with name {} already exist'.format(name)) data['name']", "self.layers[layer['name']] = Layer(self, layer) def __getattr__(self, attr): return self.get(attr, Configs.server.get(attr,", "configs. class Recipe(dict): def __init__(self, data): super().__init__(data) self.load_layers(data['layers']) def load_layers(self,", "logger.info('Adding layer: {0}'.format(recipe_name)) ''' Plugins.load() Plugins.hook('before_load', config=Configs) def load_recipe(data): name", "os.path.normpath(recipe_configs).split('/')[-1] # for windows elif '\\\\' in recipe_configs: recipe_name =", "# for windows elif '\\\\' in recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('\\\\')[-1]", "# for attributes. If not found in the root recipes", "init_layer_recipes(cls, recipe_configs): recipe_name = None if '/' in recipe_configs: recipe_name", "on the file name # this is needed by the", "this is needed by the tilejson query cls.recipes[recipe_name].name = recipe_name", "Recipe(dict): def __init__(self, data): super().__init__(data) self.load_layers(data['layers']) def load_layers(self, layers): self.layers", "for recipe in Configs.layers: with Path(recipe).open() as f: load_recipe(yaml.load(f.read())) Plugins.hook('load',", "add the recipe name based on the file name #", "as f: load_recipe(yaml.load(f.read())) Plugins.hook('load', config=config, recipes=RECIPES) ''' # the following", "does not have a YAML extension: {0}'.format(recipe_configs)) with open(recipe_configs) as", "data): self.layer = layer super().__init__(data) def __getattr__(self, attr): return self.get(attr,", "self.get('description', 'no description provided') class Query(dict): def __init__(self, layer, data):", "import yaml import logging logger = logging.getLogger(__name__) class Configs: server", "not found in the root recipes level then it will", "open(recipe_configs) as r_c: load_recipe = yaml.load(r_c.read()) cls.recipes[recipe_name] = Recipe(load_recipe) #", "cls.recipes[recipe_name] = Recipe(load_recipe) # add the recipe name based on", "query in queries: self.queries.append(Query(self, query)) def __getattr__(self, attr): return self.get(attr,", "self.get(attr, getattr(self.recipe, attr)) @property def id(self): return '{0}:{1}'.format(self.recipe.name, self.name) @property", "Layer(dict): def __init__(self, recipe, layer_data): self.recipe = recipe super().__init__(layer_data) self.load_queries(layer_data['queries'])", "recipe_configs: recipe_name = os.path.normpath(recipe_configs).split('\\\\')[-1] if recipe_name[-4:] == '.yml': recipe_name =", "load_recipe(data): name = data.get('name', 'default') if name in RECIPES: raise", "return self.get('description', 'no description provided') class Query(dict): def __init__(self, layer,", "DB = None plugins = None @classmethod def init_server_configs(cls, server_configs):", "the tilejson query cls.recipes[recipe_name].name = recipe_name logger.info('Adding layer: {0}'.format(recipe_name)) '''", "Plugins.hook('before_load', config=Configs) def load_recipe(data): name = data.get('name', 'default') if name", "if len(RECIPES) == 1 and name != 'default': RECIPES['default'] =", "__init__(self, data): super().__init__(data) self.load_layers(data['layers']) def load_layers(self, layers): self.layers = {}", "def load_queries(self, queries): self.queries = [] for query in queries:", "getattr(self.recipe, attr)) @property def id(self): return '{0}:{1}'.format(self.recipe.name, self.name) @property def", "a YAML extension: {0}'.format(recipe_configs)) with open(recipe_configs) as r_c: load_recipe =", "needed by the tilejson query cls.recipes[recipe_name].name = recipe_name logger.info('Adding layer:", "''' Plugins.load() Plugins.hook('before_load', config=Configs) def load_recipe(data): name = data.get('name', 'default')", "{0}'.format(recipe_name)) ''' Plugins.load() Plugins.hook('before_load', config=Configs) def load_recipe(data): name = data.get('name'," ]
[ "from .torch2onnx import torch2onnx from .onnx2trt import onnx2trt from .torch2trt", "import onnx2trt from .torch2trt import torch2trt from .base import load,", "import torch2onnx from .onnx2trt import onnx2trt from .torch2trt import torch2trt", "onnx2trt from .torch2trt import torch2trt from .base import load, save", "torch2onnx from .onnx2trt import onnx2trt from .torch2trt import torch2trt from", ".torch2onnx import torch2onnx from .onnx2trt import onnx2trt from .torch2trt import", "from .onnx2trt import onnx2trt from .torch2trt import torch2trt from .base", ".onnx2trt import onnx2trt from .torch2trt import torch2trt from .base import" ]
[ "= torch.FloatTensor(batch_data['verb_concept_label']).to(self.device) noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device) verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device) noun_concept_mask =", "cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds']) ####################################################### # concept scores use jaccard similarity concept_verb_scores", "kwargs['noun_lens'] # (batch, num_nouns) noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(),", "outs = { 'vid_names': vid_names, 'cap_names': cap_names, 'scores': scores, }", "noun_masks) ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks, True) return sent_scores,", "'''Args: - vid_embeds: (batch, num_frames, embed_size) - vid_masks: (batch, num_frames)", "np.concatenate(all_scores[k][-1], axis=1) for k in range(K): all_scores[k] = np.concatenate(all_scores[k], axis=0)", "embed self.simattn_sigma = 4 self.hard_topk = 1 self.max_violation = True", "= im.unsqueeze(1).expand(-1, s_bs, -1) s = s.unsqueeze(0).expand(im_bs, -1, -1) intersection", "concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores def jaccard_sim(self, im, s): im_bs =", "= ground_sims.masked_fill(vid_pad_masks, 0) ############## else: vid_attn_per_word = ground_sims vid_attn_per_word[vid_attn_per_word <", "= torch.BoolTensor(batch_data['noun_masks']).to(self.device) node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n) rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device)", "vid_names, all_scores = [], [[] for _ in range(K)] cap_names", "of load and load_from_dict self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig() self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig()", "self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks) ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks,", "vid_name in vid_names: i2t_gts.append([]) for i, cap_name in enumerate(cap_names): if", "vid_verb_embeds = kwargs['vid_verb_embeds'] # (batch, num_frames, dim_embed) verb_embeds = kwargs['verb_embeds']", "torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device) noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device) v_mask_sum = torch.sum(verb_concept_mask, dim=1) n_mask_sum =", "concept_noun_loss.item())) return loss def evaluate_scores(self, tst_reader): K = self.config.subcfgs[VISENC].num_levels K", "} def forward_text_embed(self, batch_data): sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence sent_lens", "= torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device) node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device)", "{ 'vid_names': vid_names, 'cap_names': cap_names, 'scores': scores, } return metrics,", "noun_embeds: (batch, num_xxx, dim_embed) sent_embeds, verb_embeds, noun_embeds, local_sent_embeds, sent_logits =", "batch_vids, num_frames, _ = vid_embeds.size() vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3)", "concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs) scores = (sent_scores +", "scores use jaccard similarity concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0]) concept_noun_scores =", "local_sent_embeds[1], noun_masks, True) return sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores,", "numpy as np import torch import framework.ops import t2vretrieval.encoders.mlsent import", "dim_embed) vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits, max_len = self.submods[VISENC](vid_fts, vid_lens)", "fused_scores = (np.mean(scores[idx], 0) + np.mean(scores[3:5], 0))/2 metrics = self.calculate_metrics(fused_scores,", "__init__(self): super().__init__() self.num_verbs = 4 self.num_nouns = 6 self.attn_fusion =", "vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) ############## else: vid_attn_per_word = ground_sims vid_attn_per_word[vid_attn_per_word", "nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1) nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum) vbce_sent_loss =", "sim, embed self.simattn_sigma = 4 self.hard_topk = 1 self.max_violation =", "step % self.config.monitor_iter == 0: neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10)", "(n_img, n_cap) all_scores = np.array(all_scores) # (k, n_img, n_cap) return", "+ concept_loss else: loss = self.config.loss_weights[0] * fusion_loss + \\", "noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device) v_mask_sum = torch.sum(verb_concept_mask, dim=1) n_mask_sum = torch.sum(noun_concept_mask,", "t2i_gts.setdefault(t_gt, []) t2i_gts[t_gt].append(i) idx = [0, 1, 2, 5, 6]", "vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device) vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device) # (batch, max_vis_len, dim_embed)", "vid_verb_embeds, 'vid_noun_embeds': vid_noun_embeds, 'local_vid_embeds': local_sent_embeds, 'vid_lens': vid_lens, 'max_len': max_len, 'logits':", "rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n) verb_lens = torch.sum(verb_masks, 2) noun_lens", "vid_lens, 'max_len': max_len, 'logits': logits, } def forward_text_embed(self, batch_data): sent_ids", "= torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1) nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum) vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0],", "scores = (sent_scores + verb_scores + noun_scores + local_verb_scores +", "step is not None and self.config.monitor_iter > 0 and step", "in enumerate(i2t_gts): for t_gt in t_gts: t2i_gts.setdefault(t_gt, []) t2i_gts[t_gt].append(i) idx", "K = K + 4 assert K == 7, 'Note", "k in range(K): all_scores[k].append([]) ijj = 0 for cap_data in", "phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) \\ / torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1)", "_ in range(K)] cap_names = tst_reader.dataset.captions for vid_data in tst_reader:", "idx = [0, 1, 2, 5, 6] fused_scores = (np.mean(scores[idx],", "vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) ################# vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2) if", "return sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores def jaccard_sim(self,", "= [], [[] for _ in range(K)] cap_names = tst_reader.dataset.captions", "= self.criterion(noun_scores) eta = 0.1 mu = 0.01 concept_verb_loss =", "verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks) ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks,", "self.config.num_verbs, inverse=False) # sum: (batch_vids, batch_sents) verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks,", "1).long(), self.config.num_nouns, inverse=False) # sum: (batch_vids, batch_sents) noun_scores = self.generate_phrase_scores(vid_noun_embeds,", "= 0 vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2) if mask_flag: vid_attn_per_word =", "scores!' vid_names, all_scores = [], [[] for _ in range(K)]", "##### shared ##### vid_lens = kwargs['vid_lens'] # (batch, ) num_frames", "for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size): cap_enc_outs = self.forward_text_embed(cap_data) cap_enc_outs.update(vid_enc_outs) indv_scores =", "0))/2 metrics = self.calculate_metrics(fused_scores, i2t_gts, t2i_gts) if return_outs: outs =", "= True self.loss_weights = None ## this config will be", "concept_noun_scores, local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs) scores = (sent_scores + verb_scores", "noun_loss + \\ vbce_loss + nbce_loss if step is not", "dim=1) vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1) vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum) nbce_loss", "= torch.sum(noun_masks, 2) # sent_embeds: (batch, dim_embed) # verb_embeds, noun_embeds:", "inverse=False) # batch*max_len ##### sentence-level scores ##### sent_scores = cosine_sim(kwargs['vid_sent_embeds'],", "self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks) ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks,", "+ local_noun_scores) / 5 scores2 = (concept_verb_scores + concept_noun_scores) /", "%.4f'%(step, vbce_loss.item(), nbce_loss.item())) self.print_fn('\\tstep %d: vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(),", "verb_scores + noun_scores + local_verb_scores + local_noun_scores) / 5 scores2", "'verb_embeds': verb_embeds, 'verb_lens': verb_lens, 'noun_embeds': noun_embeds, 'noun_lens': noun_lens, 'sent_logits': sent_logits,", "verb-level scores ##### vid_verb_embeds = kwargs['vid_verb_embeds'] # (batch, num_frames, dim_embed)", "0 for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size): cap_enc_outs = self.forward_text_embed(cap_data) cap_enc_outs.update(vid_enc_outs) indv_scores", "# (batch, num_verbs) local_vid_embeds =kwargs['local_vid_embeds'] local_sent_embeds = kwargs['local_sent_embeds'] verb_masks =", "= s.size(0) im = im.unsqueeze(1).expand(-1, s_bs, -1) s = s.unsqueeze(0).expand(im_bs,", "t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]), TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC]) } def forward_video_embed(self, batch_data): vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device)", "phrase_2d_embeds = phrase_embeds.view(-1, dim_embed) # size = (batch_vids, batch_phrases, num_frames,", "framework.ops.l2norm(vid_attn_per_word, dim=2) if mask_flag: vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) ################# vid_attn_per_word", "return { 'vid_sent_embeds': vid_sent_embeds, 'vid_verb_embeds': vid_verb_embeds, 'vid_noun_embeds': vid_noun_embeds, 'local_vid_embeds': local_sent_embeds,", "sent_lens, 'verb_embeds': verb_embeds, 'verb_lens': verb_lens, 'noun_embeds': noun_embeds, 'noun_lens': noun_lens, 'sent_logits':", "dim_embed) noun_lens = kwargs['noun_lens'] # (batch, num_nouns) noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens", "load and load_from_dict self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig() self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig() class", "0: neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10) self.print_fn('\\tstep %d: pos mean", "vid_masks, phrase_embeds, phrase_masks, mask_flag=False): '''Args: - vid_embeds: (batch, num_frames, embed_size)", "batch_sents) verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks) ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0],", "if mask_flag: vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) ############## else: vid_attn_per_word =", "loss = fusion_loss + 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss else:", "kwargs['noun_embeds'] # (batch, num_nouns, dim_embed) noun_lens = kwargs['noun_lens'] # (batch,", "vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1) vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum) nbce_loss =", "= 4 self.hard_topk = 1 self.max_violation = True self.loss_weights =", "local_vid_embeds =kwargs['local_vid_embeds'] local_sent_embeds = kwargs['local_sent_embeds'] verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens > 0,", "torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds) word_attn_sims = torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds)) elif self.config.attn_fusion", "def forward_text_embed(self, batch_data): sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence sent_lens =", "n_mask_sum = torch.sum(noun_concept_mask, dim=1) vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1) vbce_loss", "import torch import framework.ops import t2vretrieval.encoders.mlsent import t2vretrieval.encoders.mlvideo import t2vretrieval.models.globalmatch", "+ local_verb_scores + local_noun_scores) / 5 scores2 = (concept_verb_scores +", "vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2) if self.config.attn_fusion == 'embed':", "dim=2) if mask_flag: vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) ################# vid_attn_per_word =", "neg mean scores i2t %.2f, t2i %.2f'%( step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores,", "= self.criterion(sent_scores) verb_loss = self.criterion(verb_scores) noun_loss = self.criterion(noun_scores) eta =", "(vid_masks == 0).unsqueeze(1).unsqueeze(3) batch_phrases, num_phrases, dim_embed = phrase_embeds.size() # compute", "# (batch, num_frames, dim_embed) verb_embeds = kwargs['verb_embeds'] # (batch, num_verbs,", "verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(), self.config.num_verbs, inverse=False) # sum:", "self.config.monitor_iter > 0 and step % self.config.monitor_iter == 0: neg_scores", "= mu*torch.mean(nbce_loss/n_mask_sum) vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1) vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum)", "} def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks, mask_flag=False): '''Args: -", "sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len", "num_verbs, dim_embed) verb_lens = kwargs['verb_lens'] # (batch, num_verbs) local_vid_embeds =kwargs['local_vid_embeds']", "embed_size) - phrase_masks: (batch, num_phrases) ''' batch_vids, num_frames, _ =", "torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device) node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ##", "\\ self.config.loss_weights[2] * verb_loss + \\ self.config.loss_weights[3] * noun_loss +", "(concept_verb_scores + concept_noun_scores) / 2 sent_loss = self.criterion(sent_scores) verb_loss =", "= torch.FloatTensor(batch_data['noun_concept_label']).to(self.device) verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device) noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device) v_mask_sum =", "%d: sim_loss %.4f, vsim_loss %.4f, nsim_loss %.4f'%(step, concept_loss.item(), concept_verb_loss.item(), concept_noun_loss.item()))", "= fusion_loss + 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss else: loss", "phrase_scores def generate_scores(self, **kwargs): ##### shared ##### vid_lens = kwargs['vid_lens']", "from t2vretrieval.models.criterion import cosine_sim from t2vretrieval.models.globalmatch import VISENC, TXTENC class", "# (batch, ) num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1) vid_masks = framework.ops.sequence_mask(vid_lens, num_frames,", "noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks) ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks,", "eta = 0.1 mu = 0.01 concept_verb_loss = 0.5*self.criterion(concept_verb_scores) concept_noun_loss", "v_mask_sum = torch.sum(verb_concept_mask, dim=1) n_mask_sum = torch.sum(noun_concept_mask, dim=1) vbce_loss =", "if step is not None and self.config.monitor_iter > 0 and", "vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2) if mask_flag: vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18)", "'logits': logits, } def forward_text_embed(self, batch_data): sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ##", "kwargs['verb_embeds'] # (batch, num_verbs, dim_embed) verb_lens = kwargs['verb_lens'] # (batch,", "%d: sent_loss %.4f, verb_loss %.4f, noun_loss %.4f, fusion_loss %.4f'%( step,", "batch_phrases, num_frames, num_phrases) ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view( batch_vids, num_frames, batch_phrases,", "noun_concept_label), dim=1) nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum) vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1)", "torch.sum(verb_concept_mask, dim=1) n_mask_sum = torch.sum(noun_concept_mask, dim=1) vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label),", "mu*torch.mean(vbce_sent_loss/v_mask_sum) nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1) nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum) fusion_loss", "* fusion_loss + \\ self.config.loss_weights[1] * sent_loss + \\ self.config.loss_weights[2]", "in tst_reader: vid_names.extend(vid_data['names']) vid_enc_outs = self.forward_video_embed(vid_data) for k in range(K):", "= self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0]) concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1]) ####################################################### ##### verb-level", "{ VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]), TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC]) } def forward_video_embed(self, batch_data): vid_fts", "if self.config.attn_fusion == 'embed': vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds) word_attn_sims", "'noun_lens': noun_lens, 'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds, } def generate_phrase_scores(self, vid_embeds,", "loss def evaluate_scores(self, tst_reader): K = self.config.subcfgs[VISENC].num_levels K = K", "=kwargs['local_vid_embeds'] local_sent_embeds = kwargs['local_sent_embeds'] verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(),", "vid_attn_per_word[vid_attn_per_word < 0] = 0 vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2) if", "+ \\ self.config.loss_weights[2] * verb_loss + \\ self.config.loss_weights[3] * noun_loss", "= torch.max(im, s).sum(-1) score = intersection / union return score", "concept_noun_loss = 0.5*self.criterion(concept_noun_scores) concept_loss = eta*self.criterion(scores2) verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device) noun_concept_label", "all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1) for k in range(K): all_scores[k] =", "* vid_attn_per_word, dim=2) if self.config.attn_fusion == 'embed': vid_attned_embeds = torch.einsum('abcd,ace->abde',", "### if mask_flag: vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) ############## else: vid_attn_per_word", "= tst_reader.dataset.captions for vid_data in tst_reader: vid_names.extend(vid_data['names']) vid_enc_outs = self.forward_video_embed(vid_data)", "all_scores[k].append([]) ijj = 0 for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size): cap_enc_outs =", "= (vid_masks == 0).unsqueeze(1).unsqueeze(3) batch_phrases, num_phrases, dim_embed = phrase_embeds.size() #", "= torch.LongTensor(batch_data['attn_lens']).to(self.device) # (batch, max_vis_len, dim_embed) vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds,", "= torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device) v_mask_sum = torch.sum(verb_concept_mask, dim=1) n_mask_sum = torch.sum(noun_concept_mask, dim=1)", "# compute component-wise similarity vid_2d_embeds = vid_embeds.view(-1, dim_embed) phrase_2d_embeds =", "verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores def jaccard_sim(self, im, s):", "self.config.num_nouns, inverse=False) # sum: (batch_vids, batch_sents) noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks,", "torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1) return phrase_scores def generate_scores(self, **kwargs): ##### shared #####", "similarity concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0]) concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1]) #######################################################", "all_scores[k] = np.concatenate(all_scores[k], axis=0) # (n_img, n_cap) all_scores = np.array(all_scores)", "'vid_names': vid_names, 'cap_names': cap_names, 'scores': scores, } return metrics, outs", "'sent_lens': sent_lens, 'verb_embeds': verb_embeds, 'verb_lens': verb_lens, 'noun_embeds': noun_embeds, 'noun_lens': noun_lens,", "import VISENC, TXTENC class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig): def __init__(self): super().__init__() self.num_verbs =", "self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig() self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig() class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel): def build_submods(self):", "vid_masks, verb_embeds, verb_masks) ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks, True)", "tst_reader: vid_names.extend(vid_data['names']) vid_enc_outs = self.forward_video_embed(vid_data) for k in range(K): all_scores[k].append([])", "'sim': # (batch_vids, batch_phrases, num_phrases) word_attn_sims = torch.sum(ground_sims * vid_attn_per_word,", "= self.generate_scores(**cap_enc_outs) for k in range(K): all_scores[k][-1].append(indv_scores[k].data.cpu().numpy()) ijj += 0", "'max_len': max_len, 'logits': logits, } def forward_text_embed(self, batch_data): sent_ids =", "num_verbs) local_vid_embeds =kwargs['local_vid_embeds'] local_sent_embeds = kwargs['local_sent_embeds'] verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens >", "torch.max(im, s).sum(-1) score = intersection / union return score def", "noun_lens = torch.sum(noun_masks, 2) # sent_embeds: (batch, dim_embed) # verb_embeds,", "# sum: (batch_vids, batch_sents) noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks)", "verb_embeds = kwargs['verb_embeds'] # (batch, num_verbs, dim_embed) verb_lens = kwargs['verb_lens']", "t_gt in t_gts: t2i_gts.setdefault(t_gt, []) t2i_gts[t_gt].append(i) idx = [0, 1,", "num_phrases, embed_size) - phrase_masks: (batch, num_phrases) ''' batch_vids, num_frames, _", "self.print_fn('\\tstep %d: pos mean scores %.2f, hard neg mean scores", "np import torch import framework.ops import t2vretrieval.encoders.mlsent import t2vretrieval.encoders.mlvideo import", "k in range(K): all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1) for k in", "1, 2, 5, 6] fused_scores = (np.mean(scores[idx], 0) + np.mean(scores[3:5],", "self.config.attn_fusion == 'embed': vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds) word_attn_sims =", "= torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n) verb_lens = torch.sum(verb_masks, 2) noun_lens =", "sent_embeds, 'sent_lens': sent_lens, 'verb_embeds': verb_embeds, 'verb_lens': verb_lens, 'noun_embeds': noun_embeds, 'noun_lens':", "enumerate(cap_names): if cap_name in tst_reader.dataset.ref_captions[vid_name]: i2t_gts[-1].append(i) t2i_gts = {} for", "self.config.loss_weights[1] * sent_loss + \\ self.config.loss_weights[2] * verb_loss + \\", "%.4f'%(step, concept_loss.item(), concept_verb_loss.item(), concept_noun_loss.item())) return loss def evaluate_scores(self, tst_reader): K", "torch.BoolTensor(batch_data['noun_masks']).to(self.device) node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n) rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ##", "= vid_embeds.view(-1, dim_embed) phrase_2d_embeds = phrase_embeds.view(-1, dim_embed) # size =", "self.generate_scores(**cap_enc_outs) for k in range(K): all_scores[k][-1].append(indv_scores[k].data.cpu().numpy()) ijj += 0 for", "== 0).unsqueeze(1).unsqueeze(3) batch_phrases, num_phrases, dim_embed = phrase_embeds.size() # compute component-wise", "VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]), TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC]) } def forward_video_embed(self, batch_data): vid_fts =", "##### verb-level scores ##### vid_verb_embeds = kwargs['vid_verb_embeds'] # (batch, num_frames,", "num_frames, num_phrases) ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view( batch_vids, num_frames, batch_phrases, num_phrases).transpose(1,", "in enumerate(cap_names): if cap_name in tst_reader.dataset.ref_captions[vid_name]: i2t_gts[-1].append(i) t2i_gts = {}", "verb_embeds, 'verb_lens': verb_lens, 'noun_embeds': noun_embeds, 'noun_lens': noun_lens, 'sent_logits': sent_logits, 'local_sent_embeds':", "is not None and self.config.monitor_iter > 0 and step %", "%.4f, fusion_loss %.4f'%( step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item())) self.print_fn('\\tstep %d:", "/ 2 sent_loss = self.criterion(sent_scores) verb_loss = self.criterion(verb_scores) noun_loss =", "axis=1) for k in range(K): all_scores[k] = np.concatenate(all_scores[k], axis=0) #", "num_nouns) noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(), self.config.num_nouns, inverse=False) #", "dim=2) if self.config.attn_fusion == 'embed': vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds)", "ground_sims vid_attn_per_word[vid_attn_per_word < 0] = 0 vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2)", "vid_data in tst_reader: vid_names.extend(vid_data['names']) vid_enc_outs = self.forward_video_embed(vid_data) for k in", "0.5*self.criterion(concept_verb_scores) concept_noun_loss = 0.5*self.criterion(concept_noun_scores) concept_loss = eta*self.criterion(scores2) verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device)", "batch_phrases, num_phrases) word_attn_sims = torch.sum(ground_sims * vid_attn_per_word, dim=2) # sum:", "im = im.unsqueeze(1).expand(-1, s_bs, -1) s = s.unsqueeze(0).expand(im_bs, -1, -1)", "mask_flag: vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) ################# vid_attn_per_word = torch.softmax(self.config.simattn_sigma *", "%.2f, hard neg mean scores i2t %.2f, t2i %.2f'%( step,", "k in range(K): all_scores[k][-1].append(indv_scores[k].data.cpu().numpy()) ijj += 0 for k in", "all_scores = np.array(all_scores) # (k, n_img, n_cap) return vid_names, cap_names,", "dim=1) vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum) nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1) nbce_sent_loss", "K == 7, 'Note that this error indicates losing other", "%.4f, noun_loss %.4f, fusion_loss %.4f'%( step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item()))", "sent_loss + \\ self.config.loss_weights[2] * verb_loss + \\ self.config.loss_weights[3] *", "union return score def forward_loss(self, batch_data, step=None): enc_outs = self.forward_video_embed(batch_data)", "verb_masks, True) ##### noun-level scores ##### vid_noun_embeds = kwargs['vid_noun_embeds'] #", "# batch*max_len ##### sentence-level scores ##### sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds'])", "> 0, 1).long(), self.config.num_verbs, inverse=False) # sum: (batch_vids, batch_sents) verb_scores", "score = intersection / union return score def forward_loss(self, batch_data,", "self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks, True) return sent_scores, verb_scores, noun_scores, concept_verb_scores,", "num_frames, inverse=False) # batch*max_len ##### sentence-level scores ##### sent_scores =", "self.config.loss_weights[3] * noun_loss + \\ vbce_loss + nbce_loss if step", "= K + 4 assert K == 7, 'Note that", "def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks, mask_flag=False): '''Args: - vid_embeds:", "= torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds) word_attn_sims = torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds)) elif", "kwargs['sent_logits'][1]) ####################################################### ##### verb-level scores ##### vid_verb_embeds = kwargs['vid_verb_embeds'] #", "batch*(n_v+n_n) rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n) verb_lens = torch.sum(verb_masks, 2)", "noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device) verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device) noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device) v_mask_sum", "for i, t_gts in enumerate(i2t_gts): for t_gt in t_gts: t2i_gts.setdefault(t_gt,", "nbce_loss.item())) self.print_fn('\\tstep %d: vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item())) self.print_fn('\\tstep", "cap_enc_outs.update(vid_enc_outs) indv_scores = self.generate_scores(**cap_enc_outs) for k in range(K): all_scores[k][-1].append(indv_scores[k].data.cpu().numpy()) ijj", "self.config.loss_weights is None: loss = fusion_loss + 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss)", "return vid_names, cap_names, all_scores def evaluate(self, tst_reader, return_outs=False): vid_names, cap_names,", "return loss def evaluate_scores(self, tst_reader): K = self.config.subcfgs[VISENC].num_levels K =", "sent_ids, sent_lens, verb_masks, noun_masks, node_roles, rel_edges) return { 'sent_embeds': sent_embeds,", "cosine_sim(vid_2d_embeds, phrase_2d_embeds).view( batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2) ### if mask_flag:", "framework.ops.l2norm(phrase_embeds)) elif self.config.attn_fusion == 'sim': # (batch_vids, batch_phrases, num_phrases) word_attn_sims", "+ \\ self.config.loss_weights[3] * noun_loss + \\ vbce_loss + nbce_loss", "torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n) rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n) verb_lens =", "= self.calculate_metrics(fused_scores, i2t_gts, t2i_gts) if return_outs: outs = { 'vid_names':", "generate_scores(self, **kwargs): ##### shared ##### vid_lens = kwargs['vid_lens'] # (batch,", "= torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1) nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum) fusion_loss = self.criterion(scores)", "= vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) ################# vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2)", "concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1]) ####################################################### ##### verb-level scores ##### vid_verb_embeds", "all_scores def evaluate(self, tst_reader, return_outs=False): vid_names, cap_names, scores = self.evaluate_scores(tst_reader)", "(batch, max_vis_len, dim_embed) vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits, max_len =", "vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3) batch_phrases, num_phrases, dim_embed = phrase_embeds.size()", "+ 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss else: loss = self.config.loss_weights[0]", "class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig): def __init__(self): super().__init__() self.num_verbs = 4 self.num_nouns =", "logits, } def forward_text_embed(self, batch_data): sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence", "= (batch_vids, batch_phrases, num_frames, num_phrases) ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view( batch_vids,", "int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1) vid_masks = framework.ops.sequence_mask(vid_lens, num_frames, inverse=False) # batch*max_len ##### sentence-level", "local_sent_embeds = kwargs['local_sent_embeds'] verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(), self.config.num_verbs,", "num_frames, dim_embed) noun_embeds = kwargs['noun_embeds'] # (batch, num_nouns, dim_embed) noun_lens", "-1e10) self.print_fn('\\tstep %d: pos mean scores %.2f, hard neg mean", "noun_loss %.4f, fusion_loss %.4f'%( step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item())) self.print_fn('\\tstep", "= mu*torch.mean(vbce_loss/v_mask_sum) nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1) nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum)", "forward_video_embed(self, batch_data): vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device) vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device) # (batch,", "cap_enc_outs = self.forward_text_embed(cap_data) cap_enc_outs.update(vid_enc_outs) indv_scores = self.generate_scores(**cap_enc_outs) for k in", "+ concept_noun_scores) / 2 sent_loss = self.criterion(sent_scores) verb_loss = self.criterion(verb_scores)", "0.1 mu = 0.01 concept_verb_loss = 0.5*self.criterion(concept_verb_scores) concept_noun_loss = 0.5*self.criterion(concept_noun_scores)", "t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig() self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig() class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel): def build_submods(self): return {", "verb_loss %.4f, noun_loss %.4f, fusion_loss %.4f'%( step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(),", "cap_names, scores = self.evaluate_scores(tst_reader) i2t_gts = [] for vid_name in", "= torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device) noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device) v_mask_sum = torch.sum(verb_concept_mask, dim=1) n_mask_sum", "= torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1) vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum) nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1],", "self.print_fn('\\tstep %d: sim_loss %.4f, vsim_loss %.4f, nsim_loss %.4f'%(step, concept_loss.item(), concept_verb_loss.item(),", "enumerate(i2t_gts): for t_gt in t_gts: t2i_gts.setdefault(t_gt, []) t2i_gts[t_gt].append(i) idx =", "local_verb_scores + local_noun_scores) / 5 scores2 = (concept_verb_scores + concept_noun_scores)", "scores ##### sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds']) ####################################################### # concept scores", "num_phrases, dim_embed = phrase_embeds.size() # compute component-wise similarity vid_2d_embeds =", "enc_outs = self.forward_video_embed(batch_data) cap_enc_outs = self.forward_text_embed(batch_data) enc_outs.update(cap_enc_outs) sent_scores, verb_scores, noun_scores,", "and self.config.monitor_iter > 0 and step % self.config.monitor_iter == 0:", "vid_masks, local_sent_embeds[0], verb_masks, True) ##### noun-level scores ##### vid_noun_embeds =", "in tst_reader.dataset.ref_captions[vid_name]: i2t_gts[-1].append(i) t2i_gts = {} for i, t_gts in", "batch_phrases, num_phrases, dim_embed = phrase_embeds.size() # compute component-wise similarity vid_2d_embeds", "import t2vretrieval.encoders.mlsent import t2vretrieval.encoders.mlvideo import t2vretrieval.models.globalmatch from t2vretrieval.models.criterion import cosine_sim", "phrase_embeds.size() # compute component-wise similarity vid_2d_embeds = vid_embeds.view(-1, dim_embed) phrase_2d_embeds", "vid_attn_per_word, dim=2) if self.config.attn_fusion == 'embed': vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word,", "# sum: (batch_vid, batch_phrases) phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2)", "verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device) noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device) v_mask_sum = torch.sum(verb_concept_mask, dim=1)", "framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(), self.config.num_verbs, inverse=False) # sum: (batch_vids, batch_sents)", "self.evaluate_scores(tst_reader) i2t_gts = [] for vid_name in vid_names: i2t_gts.append([]) for", "tst_reader): K = self.config.subcfgs[VISENC].num_levels K = K + 4 assert", "torch.mean(torch.max(neg_scores, 0)[0]))) self.print_fn('\\tstep %d: sent_loss %.4f, verb_loss %.4f, noun_loss %.4f,", "vid_attn_per_word, vid_embeds) word_attn_sims = torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds)) elif self.config.attn_fusion ==", "torch.sum(ground_sims * vid_attn_per_word, dim=2) # sum: (batch_vid, batch_phrases) phrase_scores =", "sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores def jaccard_sim(self, im,", "torch.LongTensor(batch_data['attn_lens']).to(self.device) # (batch, max_vis_len, dim_embed) vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits,", "sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length", "vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1) vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum) nbce_sent_loss =", "sum: (batch_vids, batch_sents) verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks) ind_verb_scores", "verb_lens = torch.sum(verb_masks, 2) noun_lens = torch.sum(noun_masks, 2) # sent_embeds:", "local_noun_scores) / 5 scores2 = (concept_verb_scores + concept_noun_scores) / 2", "noun_embeds = kwargs['noun_embeds'] # (batch, num_nouns, dim_embed) noun_lens = kwargs['noun_lens']", "self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig() class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel): def build_submods(self): return { VISENC:", "%.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item())) self.print_fn('\\tstep %d: sim_loss %.4f, vsim_loss", "t2i_gts) if return_outs: outs = { 'vid_names': vid_names, 'cap_names': cap_names,", "mean scores %.2f, hard neg mean scores i2t %.2f, t2i", "rel_edges) return { 'sent_embeds': sent_embeds, 'sent_lens': sent_lens, 'verb_embeds': verb_embeds, 'verb_lens':", "batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2) ### if mask_flag: vid_attn_per_word =", "= kwargs['vid_noun_embeds'] # (batch, num_frames, dim_embed) noun_embeds = kwargs['noun_embeds'] #", "self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1]) ####################################################### ##### verb-level scores ##### vid_verb_embeds = kwargs['vid_verb_embeds']", "= self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks) ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0],", "will be covered by model.json due to the functions of", "sum: (batch_vids, batch_sents) noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks) ind_noun_scores", "forward_loss(self, batch_data, step=None): enc_outs = self.forward_video_embed(batch_data) cap_enc_outs = self.forward_text_embed(batch_data) enc_outs.update(cap_enc_outs)", "sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds']) ####################################################### # concept scores use jaccard", "torch.FloatTensor(batch_data['attn_fts']).to(self.device) vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device) # (batch, max_vis_len, dim_embed) vid_sent_embeds, vid_verb_embeds,", "self.generate_scores(**enc_outs) scores = (sent_scores + verb_scores + noun_scores + local_verb_scores", "(batch_vids, batch_sents) noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks) ind_noun_scores =", "batch_data): sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ##", "= 0.1 mu = 0.01 concept_verb_loss = 0.5*self.criterion(concept_verb_scores) concept_noun_loss =", "batch_phrases, num_phrases).transpose(1, 2) ### if mask_flag: vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0)", "nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum) fusion_loss = self.criterion(scores) if self.config.loss_weights is None:", "## batch*(n_v+n_n) rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n) verb_lens = torch.sum(verb_masks,", "sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs) scores", "return phrase_scores def generate_scores(self, **kwargs): ##### shared ##### vid_lens =", "# (batch_vids, batch_phrases, num_phrases) word_attn_sims = torch.sum(ground_sims * vid_attn_per_word, dim=2)", "vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum) nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1) nbce_loss =", "= kwargs['verb_embeds'] # (batch, num_verbs, dim_embed) verb_lens = kwargs['verb_lens'] #", "batch_phrases) phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) \\ / torch.sum(phrase_masks,", "verb_embeds, verb_masks) ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks, True) #####", "local_sent_embeds[0], verb_masks, True) ##### noun-level scores ##### vid_noun_embeds = kwargs['vid_noun_embeds']", "scores i2t %.2f, t2i %.2f'%( step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]), torch.mean(torch.max(neg_scores,", "= t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig() self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig() class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel): def build_submods(self): return", "(batch, num_phrases) ''' batch_vids, num_frames, _ = vid_embeds.size() vid_pad_masks =", "t2i %.2f'%( step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]), torch.mean(torch.max(neg_scores, 0)[0]))) self.print_fn('\\tstep %d:", "noun_masks, True) return sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores", "2, 5, 6] fused_scores = (np.mean(scores[idx], 0) + np.mean(scores[3:5], 0))/2", "= self.config.loss_weights[0] * fusion_loss + \\ self.config.loss_weights[1] * sent_loss +", "# sum: (batch_vids, batch_sents) verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks)", "range(K): all_scores[k].append([]) ijj = 0 for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size): cap_enc_outs", "1).float().unsqueeze(0).clamp(min=1) return phrase_scores def generate_scores(self, **kwargs): ##### shared ##### vid_lens", "1*(vbce_sent_loss+nbce_sent_loss) + concept_loss else: loss = self.config.loss_weights[0] * fusion_loss +", "= self.criterion(scores) if self.config.loss_weights is None: loss = fusion_loss +", "scores ##### vid_verb_embeds = kwargs['vid_verb_embeds'] # (batch, num_frames, dim_embed) verb_embeds", "self.num_verbs = 4 self.num_nouns = 6 self.attn_fusion = 'embed' #", "sent_loss %.4f, verb_loss %.4f, noun_loss %.4f, fusion_loss %.4f'%( step, sent_loss.data.item(),", "torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n) verb_lens = torch.sum(verb_masks, 2) noun_lens = torch.sum(noun_masks,", "framework.ops import t2vretrieval.encoders.mlsent import t2vretrieval.encoders.mlvideo import t2vretrieval.models.globalmatch from t2vretrieval.models.criterion import", "torch.sum(noun_masks, 2) # sent_embeds: (batch, dim_embed) # verb_embeds, noun_embeds: (batch,", "0)[0]))) self.print_fn('\\tstep %d: sent_loss %.4f, verb_loss %.4f, noun_loss %.4f, fusion_loss", "self.loss_weights = None ## this config will be covered by", "for k in range(K): all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1) for k", "mu*torch.mean(vbce_loss/v_mask_sum) nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1) nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum) vbce_sent_loss", "= torch.sum(verb_concept_mask, dim=1) n_mask_sum = torch.sum(noun_concept_mask, dim=1) vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0],", "range(K): all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1) for k in range(K): all_scores[k]", "1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss else: loss = self.config.loss_weights[0] *", "K + 4 assert K == 7, 'Note that this", "cap_names = tst_reader.dataset.captions for vid_data in tst_reader: vid_names.extend(vid_data['names']) vid_enc_outs =", "####################################################### ##### verb-level scores ##### vid_verb_embeds = kwargs['vid_verb_embeds'] # (batch,", "dtype=torch.bool).to(self.device), -1e10) self.print_fn('\\tstep %d: pos mean scores %.2f, hard neg", "functions of load and load_from_dict self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig() self.subcfgs[TXTENC] =", "concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0]) concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1]) ####################################################### #####", "2) ### if mask_flag: vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) ############## else:", "kwargs['vid_lens'] # (batch, ) num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1) vid_masks = framework.ops.sequence_mask(vid_lens,", "dim=2) # sum: (batch_vid, batch_phrases) phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0),", "vid_noun_embeds, 'local_vid_embeds': local_sent_embeds, 'vid_lens': vid_lens, 'max_len': max_len, 'logits': logits, }", "im_bs = im.size(0) s_bs = s.size(0) im = im.unsqueeze(1).expand(-1, s_bs,", "torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2) if self.config.attn_fusion == 'embed': vid_attned_embeds =", "self.config.loss_weights[2] * verb_loss + \\ self.config.loss_weights[3] * noun_loss + \\", "local_sent_embeds, 'vid_lens': vid_lens, 'max_len': max_len, 'logits': logits, } def forward_text_embed(self,", "intersection = torch.min(im, s).sum(-1) union = torch.max(im, s).sum(-1) score =", "num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1) vid_masks = framework.ops.sequence_mask(vid_lens, num_frames, inverse=False) # batch*max_len", "''' batch_vids, num_frames, _ = vid_embeds.size() vid_pad_masks = (vid_masks ==", "num_phrases) ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view( batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2)", "vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum) nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1) nbce_sent_loss =", "phrase_masks, mask_flag=False): '''Args: - vid_embeds: (batch, num_frames, embed_size) - vid_masks:", "t2vretrieval.models.criterion import cosine_sim from t2vretrieval.models.globalmatch import VISENC, TXTENC class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig):", "vid_embeds.size() vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3) batch_phrases, num_phrases, dim_embed =", "verb_concept_label), dim=1) vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum) nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1)", "True) return sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores def", "nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1) nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum) fusion_loss =", "nbce_loss if step is not None and self.config.monitor_iter > 0", "'Note that this error indicates losing other scores!' vid_names, all_scores", "self.criterion(noun_scores) eta = 0.1 mu = 0.01 concept_verb_loss = 0.5*self.criterion(concept_verb_scores)", "n_img, n_cap) return vid_names, cap_names, all_scores def evaluate(self, tst_reader, return_outs=False):", "be covered by model.json due to the functions of load", "0 vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2) if mask_flag: vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks,", "= torch.min(im, s).sum(-1) union = torch.max(im, s).sum(-1) score = intersection", "cap_enc_outs = self.forward_text_embed(batch_data) enc_outs.update(cap_enc_outs) sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores,", "def jaccard_sim(self, im, s): im_bs = im.size(0) s_bs = s.size(0)", "fusion_loss = self.criterion(scores) if self.config.loss_weights is None: loss = fusion_loss", "# (k, n_img, n_cap) return vid_names, cap_names, all_scores def evaluate(self,", "sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item())) self.print_fn('\\tstep %d: vbce_loss %.4f, nbce_loss %.4f'%(step,", "sentence-level scores ##### sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds']) ####################################################### # concept", "dim_embed) verb_lens = kwargs['verb_lens'] # (batch, num_verbs) local_vid_embeds =kwargs['local_vid_embeds'] local_sent_embeds", "in range(K): all_scores[k][-1].append(indv_scores[k].data.cpu().numpy()) ijj += 0 for k in range(K):", "self.submods[VISENC](vid_fts, vid_lens) return { 'vid_sent_embeds': vid_sent_embeds, 'vid_verb_embeds': vid_verb_embeds, 'vid_noun_embeds': vid_noun_embeds,", "## batch*(1+n_v+n_n)*(1+n_v+n_n) verb_lens = torch.sum(verb_masks, 2) noun_lens = torch.sum(noun_masks, 2)", "vbce_loss %.4f, nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item())) self.print_fn('\\tstep %d: vbce_sent_loss %.4f,", "fusion_loss + 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss else: loss =", "'noun_embeds': noun_embeds, 'noun_lens': noun_lens, 'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds, } def", "TXTENC class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig): def __init__(self): super().__init__() self.num_verbs = 4 self.num_nouns", "self.criterion(scores) if self.config.loss_weights is None: loss = fusion_loss + 1*(vbce_loss+nbce_loss)", "# (n_img, n_cap) all_scores = np.array(all_scores) # (k, n_img, n_cap)", "return { 'sent_embeds': sent_embeds, 'sent_lens': sent_lens, 'verb_embeds': verb_embeds, 'verb_lens': verb_lens,", "s = s.unsqueeze(0).expand(im_bs, -1, -1) intersection = torch.min(im, s).sum(-1) union", "concept_verb_loss = 0.5*self.criterion(concept_verb_scores) concept_noun_loss = 0.5*self.criterion(concept_noun_scores) concept_loss = eta*self.criterion(scores2) verb_concept_label", "node_roles, rel_edges) return { 'sent_embeds': sent_embeds, 'sent_lens': sent_lens, 'verb_embeds': verb_embeds,", "0 and step % self.config.monitor_iter == 0: neg_scores = scores.masked_fill(torch.eye(len(scores),", "[] for vid_name in vid_names: i2t_gts.append([]) for i, cap_name in", "i, cap_name in enumerate(cap_names): if cap_name in tst_reader.dataset.ref_captions[vid_name]: i2t_gts[-1].append(i) t2i_gts", "torch import framework.ops import t2vretrieval.encoders.mlsent import t2vretrieval.encoders.mlvideo import t2vretrieval.models.globalmatch from", "kwargs['local_sent_embeds'] verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(), self.config.num_verbs, inverse=False) #", "kwargs['sent_logits'][0]) concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1]) ####################################################### ##### verb-level scores #####", "TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC]) } def forward_video_embed(self, batch_data): vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device) vid_lens", "## this config will be covered by model.json due to", "+ noun_scores + local_verb_scores + local_noun_scores) / 5 scores2 =", "##### sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds']) ####################################################### # concept scores use", "torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]), torch.mean(torch.max(neg_scores, 0)[0]))) self.print_fn('\\tstep %d: sent_loss %.4f, verb_loss", "vid_embeds.view(-1, dim_embed) phrase_2d_embeds = phrase_embeds.view(-1, dim_embed) # size = (batch_vids,", "def generate_scores(self, **kwargs): ##### shared ##### vid_lens = kwargs['vid_lens'] #", "vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) ################# vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word,", "%.2f, t2i %.2f'%( step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]), torch.mean(torch.max(neg_scores, 0)[0]))) self.print_fn('\\tstep", "noun_loss.data.item(), fusion_loss.data.item())) self.print_fn('\\tstep %d: vbce_loss %.4f, nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item()))", "this config will be covered by model.json due to the", "vid_embeds) word_attn_sims = torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds)) elif self.config.attn_fusion == 'sim':", "= {} for i, t_gts in enumerate(i2t_gts): for t_gt in", "in range(K)] cap_names = tst_reader.dataset.captions for vid_data in tst_reader: vid_names.extend(vid_data['names'])", "vid_masks: (batch, num_frames) - phrase_embeds: (batch, num_phrases, embed_size) - phrase_masks:", "kwargs['sent_embeds']) ####################################################### # concept scores use jaccard similarity concept_verb_scores =", "pos mean scores %.2f, hard neg mean scores i2t %.2f,", "= self.config.subcfgs[VISENC].num_levels K = K + 4 assert K ==", "torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len noun_masks =", "from t2vretrieval.models.globalmatch import VISENC, TXTENC class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig): def __init__(self): super().__init__()", "local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs) scores = (sent_scores + verb_scores +", "kwargs['vid_noun_embeds'] # (batch, num_frames, dim_embed) noun_embeds = kwargs['noun_embeds'] # (batch,", "n_cap) return vid_names, cap_names, all_scores def evaluate(self, tst_reader, return_outs=False): vid_names,", "i2t_gts.append([]) for i, cap_name in enumerate(cap_names): if cap_name in tst_reader.dataset.ref_captions[vid_name]:", "sent_logits, 'local_sent_embeds': local_sent_embeds, } def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks,", "vid_masks, local_sent_embeds[1], noun_masks, True) return sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores,", "in range(K): all_scores[k] = np.concatenate(all_scores[k], axis=0) # (n_img, n_cap) all_scores", "def forward_loss(self, batch_data, step=None): enc_outs = self.forward_video_embed(batch_data) cap_enc_outs = self.forward_text_embed(batch_data)", "vid_names, cap_names, scores = self.evaluate_scores(tst_reader) i2t_gts = [] for vid_name", "{ 'sent_embeds': sent_embeds, 'sent_lens': sent_lens, 'verb_embeds': verb_embeds, 'verb_lens': verb_lens, 'noun_embeds':", "/ union return score def forward_loss(self, batch_data, step=None): enc_outs =", "%.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item())) self.print_fn('\\tstep %d: sim_loss %.4f, vsim_loss %.4f, nsim_loss", "4 assert K == 7, 'Note that this error indicates", "similarity vid_2d_embeds = vid_embeds.view(-1, dim_embed) phrase_2d_embeds = phrase_embeds.view(-1, dim_embed) #", "jaccard similarity concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0]) concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1])", "verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item())) self.print_fn('\\tstep %d: vbce_loss %.4f, nbce_loss %.4f'%(step, vbce_loss.item(),", "self.max_violation = True self.loss_weights = None ## this config will", "noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device) node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n) rel_edges =", "-1e18) ################# vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2) if self.config.attn_fusion", "= { 'vid_names': vid_names, 'cap_names': cap_names, 'scores': scores, } return", "= intersection / union return score def forward_loss(self, batch_data, step=None):", "5, 6] fused_scores = (np.mean(scores[idx], 0) + np.mean(scores[3:5], 0))/2 metrics", "# sim, embed self.simattn_sigma = 4 self.hard_topk = 1 self.max_violation", "vid_noun_embeds, local_sent_embeds, logits, max_len = self.submods[VISENC](vid_fts, vid_lens) return { 'vid_sent_embeds':", "torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1) nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum) fusion_loss = self.criterion(scores) if", "local_sent_embeds, sent_logits = self.submods[TXTENC]( sent_ids, sent_lens, verb_masks, noun_masks, node_roles, rel_edges)", "vid_names: i2t_gts.append([]) for i, cap_name in enumerate(cap_names): if cap_name in", "component-wise similarity vid_2d_embeds = vid_embeds.view(-1, dim_embed) phrase_2d_embeds = phrase_embeds.view(-1, dim_embed)", "0) + np.mean(scores[3:5], 0))/2 metrics = self.calculate_metrics(fused_scores, i2t_gts, t2i_gts) if", "mu*torch.mean(nbce_loss/n_mask_sum) vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1) vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum) nbce_sent_loss", "= (np.mean(scores[idx], 0) + np.mean(scores[3:5], 0))/2 metrics = self.calculate_metrics(fused_scores, i2t_gts,", "vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds) word_attn_sims = torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds))", "shared ##### vid_lens = kwargs['vid_lens'] # (batch, ) num_frames =", "mu = 0.01 concept_verb_loss = 0.5*self.criterion(concept_verb_scores) concept_noun_loss = 0.5*self.criterion(concept_noun_scores) concept_loss", "= eta*self.criterion(scores2) verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device) noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device) verb_concept_mask =", "%d: vbce_loss %.4f, nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item())) self.print_fn('\\tstep %d: vbce_sent_loss", "= 0.01 concept_verb_loss = 0.5*self.criterion(concept_verb_scores) concept_noun_loss = 0.5*self.criterion(concept_noun_scores) concept_loss =", "(k, n_img, n_cap) return vid_names, cap_names, all_scores def evaluate(self, tst_reader,", "jaccard_sim(self, im, s): im_bs = im.size(0) s_bs = s.size(0) im", "self.config.subcfgs[VISENC].num_levels K = K + 4 assert K == 7,", "= self.forward_text_embed(batch_data) enc_outs.update(cap_enc_outs) sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores", "in vid_names: i2t_gts.append([]) for i, cap_name in enumerate(cap_names): if cap_name", "# (batch, num_nouns) noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(), self.config.num_nouns,", "fusion_loss + \\ self.config.loss_weights[1] * sent_loss + \\ self.config.loss_weights[2] *", "concept scores use jaccard similarity concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0]) concept_noun_scores", "for t_gt in t_gts: t2i_gts.setdefault(t_gt, []) t2i_gts[t_gt].append(i) idx = [0,", "= torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len noun_masks", "- phrase_embeds: (batch, num_phrases, embed_size) - phrase_masks: (batch, num_phrases) '''", "# (batch, num_verbs, dim_embed) verb_lens = kwargs['verb_lens'] # (batch, num_verbs)", "for k in range(K): all_scores[k][-1].append(indv_scores[k].data.cpu().numpy()) ijj += 0 for k", "= np.concatenate(all_scores[k], axis=0) # (n_img, n_cap) all_scores = np.array(all_scores) #", "'verb_lens': verb_lens, 'noun_embeds': noun_embeds, 'noun_lens': noun_lens, 'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds,", "mask_flag=False): '''Args: - vid_embeds: (batch, num_frames, embed_size) - vid_masks: (batch,", "class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel): def build_submods(self): return { VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]), TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC])", "2) \\ / torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1) return phrase_scores def generate_scores(self, **kwargs):", "%.4f, verb_loss %.4f, noun_loss %.4f, fusion_loss %.4f'%( step, sent_loss.data.item(), verb_loss.data.item(),", "* verb_loss + \\ self.config.loss_weights[3] * noun_loss + \\ vbce_loss", "score def forward_loss(self, batch_data, step=None): enc_outs = self.forward_video_embed(batch_data) cap_enc_outs =", "'vid_noun_embeds': vid_noun_embeds, 'local_vid_embeds': local_sent_embeds, 'vid_lens': vid_lens, 'max_len': max_len, 'logits': logits,", "that this error indicates losing other scores!' vid_names, all_scores =", "torch.sum(noun_concept_mask, dim=1) vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1) vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum)", "2 sent_loss = self.criterion(sent_scores) verb_loss = self.criterion(verb_scores) noun_loss = self.criterion(noun_scores)", "sentence sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ##", "evaluate(self, tst_reader, return_outs=False): vid_names, cap_names, scores = self.evaluate_scores(tst_reader) i2t_gts =", "= torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2) if self.config.attn_fusion == 'embed': vid_attned_embeds", "= 1 self.max_violation = True self.loss_weights = None ## this", "(batch_vids, batch_phrases, num_frames, num_phrases) ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view( batch_vids, num_frames,", "self.config.attn_fusion == 'sim': # (batch_vids, batch_phrases, num_phrases) word_attn_sims = torch.sum(ground_sims", "max_len = self.submods[VISENC](vid_fts, vid_lens) return { 'vid_sent_embeds': vid_sent_embeds, 'vid_verb_embeds': vid_verb_embeds,", "neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10) self.print_fn('\\tstep %d: pos mean scores", "self.forward_video_embed(vid_data) for k in range(K): all_scores[k].append([]) ijj = 0 for", "phrase_2d_embeds).view( batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2) ### if mask_flag: vid_attn_per_word", "[]) t2i_gts[t_gt].append(i) idx = [0, 1, 2, 5, 6] fused_scores", "phrase_embeds: (batch, num_phrases, embed_size) - phrase_masks: (batch, num_phrases) ''' batch_vids,", "= kwargs['local_sent_embeds'] verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(), self.config.num_verbs, inverse=False)", "## sentence sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device)", "+ \\ vbce_loss + nbce_loss if step is not None", "sent_embeds: (batch, dim_embed) # verb_embeds, noun_embeds: (batch, num_xxx, dim_embed) sent_embeds,", "* phrase_masks.float().unsqueeze(0), 2) \\ / torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1) return phrase_scores def", "in range(K): all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1) for k in range(K):", "self.forward_text_embed(cap_data) cap_enc_outs.update(vid_enc_outs) indv_scores = self.generate_scores(**cap_enc_outs) for k in range(K): all_scores[k][-1].append(indv_scores[k].data.cpu().numpy())", "for i, cap_name in enumerate(cap_names): if cap_name in tst_reader.dataset.ref_captions[vid_name]: i2t_gts[-1].append(i)", "self.print_fn('\\tstep %d: sent_loss %.4f, verb_loss %.4f, noun_loss %.4f, fusion_loss %.4f'%(", "all_scores[k][-1].append(indv_scores[k].data.cpu().numpy()) ijj += 0 for k in range(K): all_scores[k][-1] =", "i2t_gts = [] for vid_name in vid_names: i2t_gts.append([]) for i,", "i2t_gts, t2i_gts) if return_outs: outs = { 'vid_names': vid_names, 'cap_names':", "range(K): all_scores[k][-1].append(indv_scores[k].data.cpu().numpy()) ijj += 0 for k in range(K): all_scores[k][-1]", "k in range(K): all_scores[k] = np.concatenate(all_scores[k], axis=0) # (n_img, n_cap)", "(batch_vid, batch_phrases) phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) \\ /", "cap_name in enumerate(cap_names): if cap_name in tst_reader.dataset.ref_captions[vid_name]: i2t_gts[-1].append(i) t2i_gts =", "= cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds']) ####################################################### # concept scores use jaccard similarity", "self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks, True) ##### noun-level scores ##### vid_noun_embeds", "<reponame>Roc-Ng/HANet<gh_stars>10-100 import numpy as np import torch import framework.ops import", "dim_embed) # verb_embeds, noun_embeds: (batch, num_xxx, dim_embed) sent_embeds, verb_embeds, noun_embeds,", "num_xxx, dim_embed) sent_embeds, verb_embeds, noun_embeds, local_sent_embeds, sent_logits = self.submods[TXTENC]( sent_ids,", "np.mean(scores[3:5], 0))/2 metrics = self.calculate_metrics(fused_scores, i2t_gts, t2i_gts) if return_outs: outs", "'sent_embeds': sent_embeds, 'sent_lens': sent_lens, 'verb_embeds': verb_embeds, 'verb_lens': verb_lens, 'noun_embeds': noun_embeds,", "verb_concept_label), dim=1) vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum) nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1)", "fusion_loss %.4f'%( step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item())) self.print_fn('\\tstep %d: vbce_loss", "cap_name in tst_reader.dataset.ref_captions[vid_name]: i2t_gts[-1].append(i) t2i_gts = {} for i, t_gts", "tst_reader.dataset.captions for vid_data in tst_reader: vid_names.extend(vid_data['names']) vid_enc_outs = self.forward_video_embed(vid_data) for", "+= 0 for k in range(K): all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1)", "{} for i, t_gts in enumerate(i2t_gts): for t_gt in t_gts:", "None ## this config will be covered by model.json due", "== 0: neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10) self.print_fn('\\tstep %d: pos", "= self.evaluate_scores(tst_reader) i2t_gts = [] for vid_name in vid_names: i2t_gts.append([])", "sent_lens, verb_masks, noun_masks, node_roles, rel_edges) return { 'sent_embeds': sent_embeds, 'sent_lens':", "local_noun_scores = self.generate_scores(**enc_outs) scores = (sent_scores + verb_scores + noun_scores", "in t_gts: t2i_gts.setdefault(t_gt, []) t2i_gts[t_gt].append(i) idx = [0, 1, 2,", "vid_names, 'cap_names': cap_names, 'scores': scores, } return metrics, outs else:", "5 scores2 = (concept_verb_scores + concept_noun_scores) / 2 sent_loss =", "verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs) scores =", "4 self.hard_topk = 1 self.max_violation = True self.loss_weights = None", "word_attn_sims = torch.sum(ground_sims * vid_attn_per_word, dim=2) # sum: (batch_vid, batch_phrases)", "this error indicates losing other scores!' vid_names, all_scores = [],", "= None ## this config will be covered by model.json", "vid_attn_per_word = ground_sims vid_attn_per_word[vid_attn_per_word < 0] = 0 vid_attn_per_word =", "ground_sims.masked_fill(vid_pad_masks, 0) ############## else: vid_attn_per_word = ground_sims vid_attn_per_word[vid_attn_per_word < 0]", "inverse=False) # sum: (batch_vids, batch_sents) noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds,", "# verb_embeds, noun_embeds: (batch, num_xxx, dim_embed) sent_embeds, verb_embeds, noun_embeds, local_sent_embeds,", "in range(K): all_scores[k].append([]) ijj = 0 for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size):", "vid_sent_embeds, 'vid_verb_embeds': vid_verb_embeds, 'vid_noun_embeds': vid_noun_embeds, 'local_vid_embeds': local_sent_embeds, 'vid_lens': vid_lens, 'max_len':", "dim_embed) sent_embeds, verb_embeds, noun_embeds, local_sent_embeds, sent_logits = self.submods[TXTENC]( sent_ids, sent_lens,", "(batch_vids, batch_sents) verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks) ind_verb_scores =", "range(K): all_scores[k] = np.concatenate(all_scores[k], axis=0) # (n_img, n_cap) all_scores =", "1 self.max_violation = True self.loss_weights = None ## this config", "%.4f, nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item())) self.print_fn('\\tstep %d: vbce_sent_loss %.4f, nbce_sent_loss", "VISENC, TXTENC class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig): def __init__(self): super().__init__() self.num_verbs = 4", "else: vid_attn_per_word = ground_sims vid_attn_per_word[vid_attn_per_word < 0] = 0 vid_attn_per_word", "= vid_embeds.size() vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3) batch_phrases, num_phrases, dim_embed", "= kwargs['noun_embeds'] # (batch, num_nouns, dim_embed) noun_lens = kwargs['noun_lens'] #", "mu*torch.mean(nbce_sent_loss/n_mask_sum) fusion_loss = self.criterion(scores) if self.config.loss_weights is None: loss =", "vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits, max_len = self.submods[VISENC](vid_fts, vid_lens) return {", "%d: vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item())) self.print_fn('\\tstep %d: sim_loss", "= torch.sum(verb_masks, 2) noun_lens = torch.sum(noun_masks, 2) # sent_embeds: (batch,", "s_bs = s.size(0) im = im.unsqueeze(1).expand(-1, s_bs, -1) s =", "(batch, num_frames, embed_size) - vid_masks: (batch, num_frames) - phrase_embeds: (batch,", "################# vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2) if self.config.attn_fusion ==", "model.json due to the functions of load and load_from_dict self.subcfgs[VISENC]", "# (batch, max_vis_len, dim_embed) vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits, max_len", "torch.min(im, s).sum(-1) union = torch.max(im, s).sum(-1) score = intersection /", "local_sent_embeds, logits, max_len = self.submods[VISENC](vid_fts, vid_lens) return { 'vid_sent_embeds': vid_sent_embeds,", "super().__init__() self.num_verbs = 4 self.num_nouns = 6 self.attn_fusion = 'embed'", "forward_text_embed(self, batch_data): sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device)", "concept_noun_scores) / 2 sent_loss = self.criterion(sent_scores) verb_loss = self.criterion(verb_scores) noun_loss", "* vid_attn_per_word, dim=2) # sum: (batch_vid, batch_phrases) phrase_scores = torch.sum(word_attn_sims", "ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks, True) return sent_scores, verb_scores,", "s.unsqueeze(0).expand(im_bs, -1, -1) intersection = torch.min(im, s).sum(-1) union = torch.max(im,", "= 0.5*self.criterion(concept_noun_scores) concept_loss = eta*self.criterion(scores2) verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device) noun_concept_label =", "loss = self.config.loss_weights[0] * fusion_loss + \\ self.config.loss_weights[1] * sent_loss", "im, s): im_bs = im.size(0) s_bs = s.size(0) im =", "0, 1).long(), self.config.num_nouns, inverse=False) # sum: (batch_vids, batch_sents) noun_scores =", "(sent_scores + verb_scores + noun_scores + local_verb_scores + local_noun_scores) /", "+ 4 assert K == 7, 'Note that this error", "= kwargs['noun_lens'] # (batch, num_nouns) noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens > 0,", "sent_loss = self.criterion(sent_scores) verb_loss = self.criterion(verb_scores) noun_loss = self.criterion(noun_scores) eta", "= framework.ops.sequence_mask(vid_lens, num_frames, inverse=False) # batch*max_len ##### sentence-level scores #####", "= (sent_scores + verb_scores + noun_scores + local_verb_scores + local_noun_scores)", "use jaccard similarity concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0]) concept_noun_scores = self.jaccard_sim(kwargs['logits'][1],", "phrase_embeds, phrase_masks, mask_flag=False): '''Args: - vid_embeds: (batch, num_frames, embed_size) -", "dim_embed) # size = (batch_vids, batch_phrases, num_frames, num_phrases) ground_sims =", "phrase_masks: (batch, num_phrases) ''' batch_vids, num_frames, _ = vid_embeds.size() vid_pad_masks", "in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size): cap_enc_outs = self.forward_text_embed(cap_data) cap_enc_outs.update(vid_enc_outs) indv_scores = self.generate_scores(**cap_enc_outs) for", "'vid_verb_embeds': vid_verb_embeds, 'vid_noun_embeds': vid_noun_embeds, 'local_vid_embeds': local_sent_embeds, 'vid_lens': vid_lens, 'max_len': max_len,", "= kwargs['vid_lens'] # (batch, ) num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1) vid_masks =", "verb_loss + \\ self.config.loss_weights[3] * noun_loss + \\ vbce_loss +", "= torch.FloatTensor(batch_data['attn_fts']).to(self.device) vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device) # (batch, max_vis_len, dim_embed) vid_sent_embeds,", "(batch_vids, batch_phrases, num_phrases) word_attn_sims = torch.sum(ground_sims * vid_attn_per_word, dim=2) #", "%d: pos mean scores %.2f, hard neg mean scores i2t", "if cap_name in tst_reader.dataset.ref_captions[vid_name]: i2t_gts[-1].append(i) t2i_gts = {} for i,", "num_frames, _ = vid_embeds.size() vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3) batch_phrases,", "[0, 1, 2, 5, 6] fused_scores = (np.mean(scores[idx], 0) +", "torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length verb_masks =", "and load_from_dict self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig() self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig() class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel):", "im.unsqueeze(1).expand(-1, s_bs, -1) s = s.unsqueeze(0).expand(im_bs, -1, -1) intersection =", "- phrase_masks: (batch, num_phrases) ''' batch_vids, num_frames, _ = vid_embeds.size()", "scores = self.evaluate_scores(tst_reader) i2t_gts = [] for vid_name in vid_names:", "self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0]) concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1]) ####################################################### ##### verb-level scores", "self.config.monitor_iter == 0: neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10) self.print_fn('\\tstep %d:", "torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) \\ / torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1) return phrase_scores", "framework.ops.sequence_mask(vid_lens, num_frames, inverse=False) # batch*max_len ##### sentence-level scores ##### sent_scores", "noun-level scores ##### vid_noun_embeds = kwargs['vid_noun_embeds'] # (batch, num_frames, dim_embed)", "i2t %.2f, t2i %.2f'%( step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]), torch.mean(torch.max(neg_scores, 0)[0])))", "self.submods[TXTENC]( sent_ids, sent_lens, verb_masks, noun_masks, node_roles, rel_edges) return { 'sent_embeds':", "im.size(0) s_bs = s.size(0) im = im.unsqueeze(1).expand(-1, s_bs, -1) s", "noun_masks, node_roles, rel_edges) return { 'sent_embeds': sent_embeds, 'sent_lens': sent_lens, 'verb_embeds':", "num_phrases) ''' batch_vids, num_frames, _ = vid_embeds.size() vid_pad_masks = (vid_masks", "vid_2d_embeds = vid_embeds.view(-1, dim_embed) phrase_2d_embeds = phrase_embeds.view(-1, dim_embed) # size", "return_outs: outs = { 'vid_names': vid_names, 'cap_names': cap_names, 'scores': scores,", "batch_sents) noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks) ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1],", "RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel): def build_submods(self): return { VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]), TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC]) }", "= s.unsqueeze(0).expand(im_bs, -1, -1) intersection = torch.min(im, s).sum(-1) union =", "phrase_masks.float().unsqueeze(0), 2) \\ / torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1) return phrase_scores def generate_scores(self,", "def build_submods(self): return { VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]), TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC]) } def", "compute component-wise similarity vid_2d_embeds = vid_embeds.view(-1, dim_embed) phrase_2d_embeds = phrase_embeds.view(-1,", "- vid_masks: (batch, num_frames) - phrase_embeds: (batch, num_phrases, embed_size) -", "2) noun_lens = torch.sum(noun_masks, 2) # sent_embeds: (batch, dim_embed) #", "(batch, num_frames, dim_embed) verb_embeds = kwargs['verb_embeds'] # (batch, num_verbs, dim_embed)", "error indicates losing other scores!' vid_names, all_scores = [], [[]", "kwargs['vid_verb_embeds'] # (batch, num_frames, dim_embed) verb_embeds = kwargs['verb_embeds'] # (batch,", "load_from_dict self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig() self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig() class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel): def", "dim=1) nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum) fusion_loss = self.criterion(scores) if self.config.loss_weights is", "= phrase_embeds.size() # compute component-wise similarity vid_2d_embeds = vid_embeds.view(-1, dim_embed)", "/ torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1) return phrase_scores def generate_scores(self, **kwargs): ##### shared", "####################################################### # concept scores use jaccard similarity concept_verb_scores = self.jaccard_sim(kwargs['logits'][0],", "intersection / union return score def forward_loss(self, batch_data, step=None): enc_outs", "dim=1) nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum) vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1) vbce_sent_loss", "scores %.2f, hard neg mean scores i2t %.2f, t2i %.2f'%(", "step=None): enc_outs = self.forward_video_embed(batch_data) cap_enc_outs = self.forward_text_embed(batch_data) enc_outs.update(cap_enc_outs) sent_scores, verb_scores,", "noun_lens = kwargs['noun_lens'] # (batch, num_nouns) noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens >", "build_submods(self): return { VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]), TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC]) } def forward_video_embed(self,", "vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits, max_len = self.submods[VISENC](vid_fts, vid_lens) return", "ind_noun_scores def jaccard_sim(self, im, s): im_bs = im.size(0) s_bs =", "= self.submods[TXTENC]( sent_ids, sent_lens, verb_masks, noun_masks, node_roles, rel_edges) return {", "noun_embeds, local_sent_embeds, sent_logits = self.submods[TXTENC]( sent_ids, sent_lens, verb_masks, noun_masks, node_roles,", "## batch*nv*max_sen_len noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device) node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n)", "def __init__(self): super().__init__() self.num_verbs = 4 self.num_nouns = 6 self.attn_fusion", "vid_masks = framework.ops.sequence_mask(vid_lens, num_frames, inverse=False) # batch*max_len ##### sentence-level scores", "scores ##### vid_noun_embeds = kwargs['vid_noun_embeds'] # (batch, num_frames, dim_embed) noun_embeds", "= self.forward_text_embed(cap_data) cap_enc_outs.update(vid_enc_outs) indv_scores = self.generate_scores(**cap_enc_outs) for k in range(K):", "verb_loss = self.criterion(verb_scores) noun_loss = self.criterion(noun_scores) eta = 0.1 mu", "def forward_video_embed(self, batch_data): vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device) vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device) #", "generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks, mask_flag=False): '''Args: - vid_embeds: (batch,", "for k in range(K): all_scores[k] = np.concatenate(all_scores[k], axis=0) # (n_img,", "= self.forward_video_embed(vid_data) for k in range(K): all_scores[k].append([]) ijj = 0", "t2i_gts = {} for i, t_gts in enumerate(i2t_gts): for t_gt", "0.01 concept_verb_loss = 0.5*self.criterion(concept_verb_scores) concept_noun_loss = 0.5*self.criterion(concept_noun_scores) concept_loss = eta*self.criterion(scores2)", "verb_embeds, noun_embeds: (batch, num_xxx, dim_embed) sent_embeds, verb_embeds, noun_embeds, local_sent_embeds, sent_logits", "dim_embed) noun_embeds = kwargs['noun_embeds'] # (batch, num_nouns, dim_embed) noun_lens =", "length verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device) node_roles", "i2t_gts[-1].append(i) t2i_gts = {} for i, t_gts in enumerate(i2t_gts): for", "= torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length verb_masks", "(np.mean(scores[idx], 0) + np.mean(scores[3:5], 0))/2 metrics = self.calculate_metrics(fused_scores, i2t_gts, t2i_gts)", "else: loss = self.config.loss_weights[0] * fusion_loss + \\ self.config.loss_weights[1] *", "step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]), torch.mean(torch.max(neg_scores, 0)[0]))) self.print_fn('\\tstep %d: sent_loss %.4f,", "torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds)) elif self.config.attn_fusion == 'sim': # (batch_vids, batch_phrases,", "True self.loss_weights = None ## this config will be covered", "_ = vid_embeds.size() vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3) batch_phrases, num_phrases,", "framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds)) elif self.config.attn_fusion == 'sim': # (batch_vids, batch_phrases, num_phrases)", "{ 'vid_sent_embeds': vid_sent_embeds, 'vid_verb_embeds': vid_verb_embeds, 'vid_noun_embeds': vid_noun_embeds, 'local_vid_embeds': local_sent_embeds, 'vid_lens':", "for _ in range(K)] cap_names = tst_reader.dataset.captions for vid_data in", "= 0 for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size): cap_enc_outs = self.forward_text_embed(cap_data) cap_enc_outs.update(vid_enc_outs)", "concept_verb_loss.item(), concept_noun_loss.item())) return loss def evaluate_scores(self, tst_reader): K = self.config.subcfgs[VISENC].num_levels", "= (concept_verb_scores + concept_noun_scores) / 2 sent_loss = self.criterion(sent_scores) verb_loss", "t2vretrieval.encoders.mlvideo import t2vretrieval.models.globalmatch from t2vretrieval.models.criterion import cosine_sim from t2vretrieval.models.globalmatch import", "batch*(1+n_v+n_n)*(1+n_v+n_n) verb_lens = torch.sum(verb_masks, 2) noun_lens = torch.sum(noun_masks, 2) #", "nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum) vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1) vbce_sent_loss =", "vid_enc_outs = self.forward_video_embed(vid_data) for k in range(K): all_scores[k].append([]) ijj =", "\\ / torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1) return phrase_scores def generate_scores(self, **kwargs): #####", "\\ self.config.loss_weights[3] * noun_loss + \\ vbce_loss + nbce_loss if", "/ 5 scores2 = (concept_verb_scores + concept_noun_scores) / 2 sent_loss", "noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores def jaccard_sim(self, im, s): im_bs", "ijj += 0 for k in range(K): all_scores[k][-1] = np.concatenate(all_scores[k][-1],", "dim_embed) verb_embeds = kwargs['verb_embeds'] # (batch, num_verbs, dim_embed) verb_lens =", "the functions of load and load_from_dict self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig() self.subcfgs[TXTENC]", "def evaluate(self, tst_reader, return_outs=False): vid_names, cap_names, scores = self.evaluate_scores(tst_reader) i2t_gts", "self.print_fn('\\tstep %d: vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item())) self.print_fn('\\tstep %d:", "s.size(0) im = im.unsqueeze(1).expand(-1, s_bs, -1) s = s.unsqueeze(0).expand(im_bs, -1,", "(batch, num_verbs, dim_embed) verb_lens = kwargs['verb_lens'] # (batch, num_verbs) local_vid_embeds", "= torch.sum(ground_sims * vid_attn_per_word, dim=2) # sum: (batch_vid, batch_phrases) phrase_scores", "= mu*torch.mean(nbce_sent_loss/n_mask_sum) fusion_loss = self.criterion(scores) if self.config.loss_weights is None: loss", "ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view( batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2) ###", "torch.sum(verb_masks, 2) noun_lens = torch.sum(noun_masks, 2) # sent_embeds: (batch, dim_embed)", "inverse=False) # sum: (batch_vids, batch_sents) verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds,", "dim_embed = phrase_embeds.size() # compute component-wise similarity vid_2d_embeds = vid_embeds.view(-1,", "= int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1) vid_masks = framework.ops.sequence_mask(vid_lens, num_frames, inverse=False) # batch*max_len #####", "num_phrases).transpose(1, 2) ### if mask_flag: vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) ##############", "'vid_lens': vid_lens, 'max_len': max_len, 'logits': logits, } def forward_text_embed(self, batch_data):", "cap_names, all_scores def evaluate(self, tst_reader, return_outs=False): vid_names, cap_names, scores =", "vid_embeds, vid_masks, phrase_embeds, phrase_masks, mask_flag=False): '''Args: - vid_embeds: (batch, num_frames,", "if return_outs: outs = { 'vid_names': vid_names, 'cap_names': cap_names, 'scores':", "indicates losing other scores!' vid_names, all_scores = [], [[] for", "[], [[] for _ in range(K)] cap_names = tst_reader.dataset.captions for", "= self.criterion(verb_scores) noun_loss = self.criterion(noun_scores) eta = 0.1 mu =", "losing other scores!' vid_names, all_scores = [], [[] for _", "self.forward_video_embed(batch_data) cap_enc_outs = self.forward_text_embed(batch_data) enc_outs.update(cap_enc_outs) sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores,", "= self.forward_video_embed(batch_data) cap_enc_outs = self.forward_text_embed(batch_data) enc_outs.update(cap_enc_outs) sent_scores, verb_scores, noun_scores, concept_verb_scores,", "phrase_embeds.view(-1, dim_embed) # size = (batch_vids, batch_phrases, num_frames, num_phrases) ground_sims", "self.forward_text_embed(batch_data) enc_outs.update(cap_enc_outs) sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores =", "num_frames, batch_phrases, num_phrases).transpose(1, 2) ### if mask_flag: vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks,", "== 'sim': # (batch_vids, batch_phrases, num_phrases) word_attn_sims = torch.sum(ground_sims *", "= self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks, True) ##### noun-level scores #####", "s).sum(-1) union = torch.max(im, s).sum(-1) score = intersection / union", "noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs) scores = (sent_scores", "##### noun-level scores ##### vid_noun_embeds = kwargs['vid_noun_embeds'] # (batch, num_frames,", "s): im_bs = im.size(0) s_bs = s.size(0) im = im.unsqueeze(1).expand(-1,", "0).unsqueeze(1).unsqueeze(3) batch_phrases, num_phrases, dim_embed = phrase_embeds.size() # compute component-wise similarity", "to the functions of load and load_from_dict self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig()", "= t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig() class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel): def build_submods(self): return { VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]),", "size = (batch_vids, batch_phrases, num_frames, num_phrases) ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view(", "self.hard_topk = 1 self.max_violation = True self.loss_weights = None ##", "- vid_embeds: (batch, num_frames, embed_size) - vid_masks: (batch, num_frames) -", "= self.submods[VISENC](vid_fts, vid_lens) return { 'vid_sent_embeds': vid_sent_embeds, 'vid_verb_embeds': vid_verb_embeds, 'vid_noun_embeds':", "batch_data, step=None): enc_outs = self.forward_video_embed(batch_data) cap_enc_outs = self.forward_text_embed(batch_data) enc_outs.update(cap_enc_outs) sent_scores,", "n_cap) all_scores = np.array(all_scores) # (k, n_img, n_cap) return vid_names,", "logits, max_len = self.submods[VISENC](vid_fts, vid_lens) return { 'vid_sent_embeds': vid_sent_embeds, 'vid_verb_embeds':", "* sent_loss + \\ self.config.loss_weights[2] * verb_loss + \\ self.config.loss_weights[3]", "scores2 = (concept_verb_scores + concept_noun_scores) / 2 sent_loss = self.criterion(sent_scores)", "t_gts: t2i_gts.setdefault(t_gt, []) t2i_gts[t_gt].append(i) idx = [0, 1, 2, 5,", "= torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n) rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n) verb_lens", "noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(), self.config.num_nouns, inverse=False) # sum:", "0] = 0 vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2) if mask_flag: vid_attn_per_word", "= 4 self.num_nouns = 6 self.attn_fusion = 'embed' # sim,", "(batch, num_xxx, dim_embed) sent_embeds, verb_embeds, noun_embeds, local_sent_embeds, sent_logits = self.submods[TXTENC](", "batch_data): vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device) vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device) # (batch, max_vis_len,", "embed_size) - vid_masks: (batch, num_frames) - phrase_embeds: (batch, num_phrases, embed_size)", "batch*max_len ##### sentence-level scores ##### sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds']) #######################################################", ") num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1) vid_masks = framework.ops.sequence_mask(vid_lens, num_frames, inverse=False) #", "%.4f, nsim_loss %.4f'%(step, concept_loss.item(), concept_verb_loss.item(), concept_noun_loss.item())) return loss def evaluate_scores(self,", "0.5*self.criterion(concept_noun_scores) concept_loss = eta*self.criterion(scores2) verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device) noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device)", "vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item())) self.print_fn('\\tstep %d: sim_loss %.4f,", "verb_masks, noun_masks, node_roles, rel_edges) return { 'sent_embeds': sent_embeds, 'sent_lens': sent_lens,", "# concept scores use jaccard similarity concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0])", "t2vretrieval.encoders.mlsent import t2vretrieval.encoders.mlvideo import t2vretrieval.models.globalmatch from t2vretrieval.models.criterion import cosine_sim from", "##### sentence-level scores ##### sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds']) ####################################################### #", "t2i_gts[t_gt].append(i) idx = [0, 1, 2, 5, 6] fused_scores =", "[[] for _ in range(K)] cap_names = tst_reader.dataset.captions for vid_data", "(batch, num_phrases, embed_size) - phrase_masks: (batch, num_phrases) ''' batch_vids, num_frames,", "+ 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss else: loss = self.config.loss_weights[0] * fusion_loss", "= [] for vid_name in vid_names: i2t_gts.append([]) for i, cap_name", "(batch, ) num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1) vid_masks = framework.ops.sequence_mask(vid_lens, num_frames, inverse=False)", "self.print_fn('\\tstep %d: vbce_loss %.4f, nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item())) self.print_fn('\\tstep %d:", "= torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) \\ / torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1) return", "verb_lens, 'noun_embeds': noun_embeds, 'noun_lens': noun_lens, 'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds, }", "verb_masks) ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks, True) ##### noun-level", "import numpy as np import torch import framework.ops import t2vretrieval.encoders.mlsent", "vid_lens) return { 'vid_sent_embeds': vid_sent_embeds, 'vid_verb_embeds': vid_verb_embeds, 'vid_noun_embeds': vid_noun_embeds, 'local_vid_embeds':", "} def forward_video_embed(self, batch_data): vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device) vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device)", "union = torch.max(im, s).sum(-1) score = intersection / union return", "6 self.attn_fusion = 'embed' # sim, embed self.simattn_sigma = 4", "local_sent_embeds, } def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks, mask_flag=False): '''Args:", "num_phrases) word_attn_sims = torch.sum(ground_sims * vid_attn_per_word, dim=2) # sum: (batch_vid,", "noun_scores + local_verb_scores + local_noun_scores) / 5 scores2 = (concept_verb_scores", "scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10) self.print_fn('\\tstep %d: pos mean scores %.2f, hard", "##### vid_verb_embeds = kwargs['vid_verb_embeds'] # (batch, num_frames, dim_embed) verb_embeds =", "noun_concept_label), dim=1) nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum) fusion_loss = self.criterion(scores) if self.config.loss_weights", "evaluate_scores(self, tst_reader): K = self.config.subcfgs[VISENC].num_levels K = K + 4", "axis=0) # (n_img, n_cap) all_scores = np.array(all_scores) # (k, n_img,", "torch.FloatTensor(batch_data['noun_concept_label']).to(self.device) verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device) noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device) v_mask_sum = torch.sum(verb_concept_mask,", "node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n) rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n)", "import t2vretrieval.models.globalmatch from t2vretrieval.models.criterion import cosine_sim from t2vretrieval.models.globalmatch import VISENC,", "# (batch, num_nouns, dim_embed) noun_lens = kwargs['noun_lens'] # (batch, num_nouns)", "dim=1) n_mask_sum = torch.sum(noun_concept_mask, dim=1) vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1)", "other scores!' vid_names, all_scores = [], [[] for _ in", "= np.concatenate(all_scores[k][-1], axis=1) for k in range(K): all_scores[k] = np.concatenate(all_scores[k],", "tst_reader, return_outs=False): vid_names, cap_names, scores = self.evaluate_scores(tst_reader) i2t_gts = []", "**kwargs): ##### shared ##### vid_lens = kwargs['vid_lens'] # (batch, )", "(batch, num_verbs) local_vid_embeds =kwargs['local_vid_embeds'] local_sent_embeds = kwargs['local_sent_embeds'] verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens", "s).sum(-1) score = intersection / union return score def forward_loss(self,", "verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device) noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device) verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device) noun_concept_mask", "tst_reader.dataset.ref_captions[vid_name]: i2t_gts[-1].append(i) t2i_gts = {} for i, t_gts in enumerate(i2t_gts):", "> 0, 1).long(), self.config.num_nouns, inverse=False) # sum: (batch_vids, batch_sents) noun_scores", "config will be covered by model.json due to the functions", "sent_embeds, verb_embeds, noun_embeds, local_sent_embeds, sent_logits = self.submods[TXTENC]( sent_ids, sent_lens, verb_masks,", "noun_embeds, 'noun_lens': noun_lens, 'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds, } def generate_phrase_scores(self,", "mask_flag: vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) ############## else: vid_attn_per_word = ground_sims", "= framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(), self.config.num_verbs, inverse=False) # sum: (batch_vids,", "concept_loss = eta*self.criterion(scores2) verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device) noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device) verb_concept_mask", "nbce_sent_loss.item())) self.print_fn('\\tstep %d: sim_loss %.4f, vsim_loss %.4f, nsim_loss %.4f'%(step, concept_loss.item(),", "##### vid_noun_embeds = kwargs['vid_noun_embeds'] # (batch, num_frames, dim_embed) noun_embeds =", "import framework.ops import t2vretrieval.encoders.mlsent import t2vretrieval.encoders.mlvideo import t2vretrieval.models.globalmatch from t2vretrieval.models.criterion", "def evaluate_scores(self, tst_reader): K = self.config.subcfgs[VISENC].num_levels K = K +", "True) ##### noun-level scores ##### vid_noun_embeds = kwargs['vid_noun_embeds'] # (batch,", "np.array(all_scores) # (k, n_img, n_cap) return vid_names, cap_names, all_scores def", "\\ vbce_loss + nbce_loss if step is not None and", "torch.mean(torch.max(neg_scores, 1)[0]), torch.mean(torch.max(neg_scores, 0)[0]))) self.print_fn('\\tstep %d: sent_loss %.4f, verb_loss %.4f,", "nsim_loss %.4f'%(step, concept_loss.item(), concept_verb_loss.item(), concept_noun_loss.item())) return loss def evaluate_scores(self, tst_reader):", "0) ############## else: vid_attn_per_word = ground_sims vid_attn_per_word[vid_attn_per_word < 0] =", "4 self.num_nouns = 6 self.attn_fusion = 'embed' # sim, embed", "(batch, dim_embed) # verb_embeds, noun_embeds: (batch, num_xxx, dim_embed) sent_embeds, verb_embeds,", "= self.generate_scores(**enc_outs) scores = (sent_scores + verb_scores + noun_scores +", "hard neg mean scores i2t %.2f, t2i %.2f'%( step, torch.mean(torch.diag(scores)),", "self.attn_fusion = 'embed' # sim, embed self.simattn_sigma = 4 self.hard_topk", "eta*self.criterion(scores2) verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device) noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device) verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device)", "indv_scores = self.generate_scores(**cap_enc_outs) for k in range(K): all_scores[k][-1].append(indv_scores[k].data.cpu().numpy()) ijj +=", "0 for k in range(K): all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1) for", "= ground_sims vid_attn_per_word[vid_attn_per_word < 0] = 0 vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word,", "torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1) vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum) nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label),", "concept_noun_scores, ind_verb_scores, ind_noun_scores def jaccard_sim(self, im, s): im_bs = im.size(0)", "word_attn_sims = torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds)) elif self.config.attn_fusion == 'sim': #", "verb_embeds, noun_embeds, local_sent_embeds, sent_logits = self.submods[TXTENC]( sent_ids, sent_lens, verb_masks, noun_masks,", "dim_embed) phrase_2d_embeds = phrase_embeds.view(-1, dim_embed) # size = (batch_vids, batch_phrases,", "ind_verb_scores, ind_noun_scores def jaccard_sim(self, im, s): im_bs = im.size(0) s_bs", "num_frames) - phrase_embeds: (batch, num_phrases, embed_size) - phrase_masks: (batch, num_phrases)", "nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item())) self.print_fn('\\tstep %d: sim_loss %.4f, vsim_loss %.4f,", "vid_names, cap_names, all_scores def evaluate(self, tst_reader, return_outs=False): vid_names, cap_names, scores", "self.criterion(verb_scores) noun_loss = self.criterion(noun_scores) eta = 0.1 mu = 0.01", "'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds, } def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds,", "= im.size(0) s_bs = s.size(0) im = im.unsqueeze(1).expand(-1, s_bs, -1)", "vid_noun_embeds = kwargs['vid_noun_embeds'] # (batch, num_frames, dim_embed) noun_embeds = kwargs['noun_embeds']", "############## else: vid_attn_per_word = ground_sims vid_attn_per_word[vid_attn_per_word < 0] = 0", "sum: (batch_vid, batch_phrases) phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) \\", "> 0 and step % self.config.monitor_iter == 0: neg_scores =", "== 'embed': vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds) word_attn_sims = torch.einsum('abde,bde->abd',", "-1) intersection = torch.min(im, s).sum(-1) union = torch.max(im, s).sum(-1) score", "all_scores = [], [[] for _ in range(K)] cap_names =", "= framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(), self.config.num_nouns, inverse=False) # sum: (batch_vids,", "= 0.5*self.criterion(concept_verb_scores) concept_noun_loss = 0.5*self.criterion(concept_noun_scores) concept_loss = eta*self.criterion(scores2) verb_concept_label =", "-1) s = s.unsqueeze(0).expand(im_bs, -1, -1) intersection = torch.min(im, s).sum(-1)", "= torch.sum(noun_concept_mask, dim=1) vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1) vbce_loss =", "= np.array(all_scores) # (k, n_img, n_cap) return vid_names, cap_names, all_scores", "1).long(), self.config.num_verbs, inverse=False) # sum: (batch_vids, batch_sents) verb_scores = self.generate_phrase_scores(vid_verb_embeds,", "enc_outs.update(cap_enc_outs) sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs)", "vid_embeds: (batch, num_frames, embed_size) - vid_masks: (batch, num_frames) - phrase_embeds:", "dim=1) vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum) nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1) nbce_loss", "is None: loss = fusion_loss + 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) +", "cosine_sim from t2vretrieval.models.globalmatch import VISENC, TXTENC class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig): def __init__(self):", "max_len, 'logits': logits, } def forward_text_embed(self, batch_data): sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device)", "# sent_embeds: (batch, dim_embed) # verb_embeds, noun_embeds: (batch, num_xxx, dim_embed)", "'vid_sent_embeds': vid_sent_embeds, 'vid_verb_embeds': vid_verb_embeds, 'vid_noun_embeds': vid_noun_embeds, 'local_vid_embeds': local_sent_embeds, 'vid_lens': vid_lens,", "covered by model.json due to the functions of load and", "= scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10) self.print_fn('\\tstep %d: pos mean scores %.2f,", "= self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks) ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1],", "return { VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]), TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC]) } def forward_video_embed(self, batch_data):", "# (batch, num_frames, dim_embed) noun_embeds = kwargs['noun_embeds'] # (batch, num_nouns,", "t_gts in enumerate(i2t_gts): for t_gt in t_gts: t2i_gts.setdefault(t_gt, []) t2i_gts[t_gt].append(i)", "== 7, 'Note that this error indicates losing other scores!'", "self.criterion(sent_scores) verb_loss = self.criterion(verb_scores) noun_loss = self.criterion(noun_scores) eta = 0.1", "and step % self.config.monitor_iter == 0: neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device),", "%.4f, vsim_loss %.4f, nsim_loss %.4f'%(step, concept_loss.item(), concept_verb_loss.item(), concept_noun_loss.item())) return loss", "# size = (batch_vids, batch_phrases, num_frames, num_phrases) ground_sims = cosine_sim(vid_2d_embeds,", "+ verb_scores + noun_scores + local_verb_scores + local_noun_scores) / 5", "return_outs=False): vid_names, cap_names, scores = self.evaluate_scores(tst_reader) i2t_gts = [] for", "6] fused_scores = (np.mean(scores[idx], 0) + np.mean(scores[3:5], 0))/2 metrics =", "cap_names, 'scores': scores, } return metrics, outs else: return metrics", "return score def forward_loss(self, batch_data, step=None): enc_outs = self.forward_video_embed(batch_data) cap_enc_outs", "due to the functions of load and load_from_dict self.subcfgs[VISENC] =", "torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1) vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum) nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label),", "= torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds)) elif self.config.attn_fusion == 'sim': # (batch_vids,", "vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device) # (batch, max_vis_len, dim_embed) vid_sent_embeds, vid_verb_embeds, vid_noun_embeds,", "K = self.config.subcfgs[VISENC].num_levels K = K + 4 assert K", "import cosine_sim from t2vretrieval.models.globalmatch import VISENC, TXTENC class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig): def", "= self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks, True) return sent_scores, verb_scores, noun_scores,", "assert K == 7, 'Note that this error indicates losing", "import t2vretrieval.encoders.mlvideo import t2vretrieval.models.globalmatch from t2vretrieval.models.criterion import cosine_sim from t2vretrieval.models.globalmatch", "= self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1]) ####################################################### ##### verb-level scores ##### vid_verb_embeds =", "noun_lens, 'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds, } def generate_phrase_scores(self, vid_embeds, vid_masks,", "0, 1).long(), self.config.num_verbs, inverse=False) # sum: (batch_vids, batch_sents) verb_scores =", "%.4f'%( step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item())) self.print_fn('\\tstep %d: vbce_loss %.4f,", "np.concatenate(all_scores[k], axis=0) # (n_img, n_cap) all_scores = np.array(all_scores) # (k,", "= kwargs['vid_verb_embeds'] # (batch, num_frames, dim_embed) verb_embeds = kwargs['verb_embeds'] #", "None and self.config.monitor_iter > 0 and step % self.config.monitor_iter ==", "step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item())) self.print_fn('\\tstep %d: vbce_loss %.4f, nbce_loss", "'embed': vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds) word_attn_sims = torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds),", "t2vretrieval.models.globalmatch import VISENC, TXTENC class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig): def __init__(self): super().__init__() self.num_verbs", "-1, -1) intersection = torch.min(im, s).sum(-1) union = torch.max(im, s).sum(-1)", "## length verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device)", "(batch, num_frames, dim_embed) noun_embeds = kwargs['noun_embeds'] # (batch, num_nouns, dim_embed)", "'embed' # sim, embed self.simattn_sigma = 4 self.hard_topk = 1", "vbce_loss + nbce_loss if step is not None and self.config.monitor_iter", "+ \\ self.config.loss_weights[1] * sent_loss + \\ self.config.loss_weights[2] * verb_loss", "7, 'Note that this error indicates losing other scores!' vid_names,", "'local_sent_embeds': local_sent_embeds, } def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks, mask_flag=False):", "num_frames, embed_size) - vid_masks: (batch, num_frames) - phrase_embeds: (batch, num_phrases,", "* noun_loss + \\ vbce_loss + nbce_loss if step is", "t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig() class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel): def build_submods(self): return { VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]), TXTENC:", "(batch, num_nouns, dim_embed) noun_lens = kwargs['noun_lens'] # (batch, num_nouns) noun_masks", "'local_vid_embeds': local_sent_embeds, 'vid_lens': vid_lens, 'max_len': max_len, 'logits': logits, } def", "noun_embeds, noun_masks) ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks, True) return", "verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device) node_roles =", "'cap_names': cap_names, 'scores': scores, } return metrics, outs else: return", "t2vretrieval.models.globalmatch from t2vretrieval.models.criterion import cosine_sim from t2vretrieval.models.globalmatch import VISENC, TXTENC", "1)[0]), torch.mean(torch.max(neg_scores, 0)[0]))) self.print_fn('\\tstep %d: sent_loss %.4f, verb_loss %.4f, noun_loss", "range(K)] cap_names = tst_reader.dataset.captions for vid_data in tst_reader: vid_names.extend(vid_data['names']) vid_enc_outs", "t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC]) } def forward_video_embed(self, batch_data): vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device) vid_lens =", "= cosine_sim(vid_2d_embeds, phrase_2d_embeds).view( batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2) ### if", "vid_attn_per_word, dim=2) # sum: (batch_vid, batch_phrases) phrase_scores = torch.sum(word_attn_sims *", "if mask_flag: vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) ################# vid_attn_per_word = torch.softmax(self.config.simattn_sigma", "batch*nv*max_sen_len noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device) node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n) rel_edges", "verb_lens = kwargs['verb_lens'] # (batch, num_verbs) local_vid_embeds =kwargs['local_vid_embeds'] local_sent_embeds =", "self.config.loss_weights[0] * fusion_loss + \\ self.config.loss_weights[1] * sent_loss + \\", "i, t_gts in enumerate(i2t_gts): for t_gt in t_gts: t2i_gts.setdefault(t_gt, [])", "for k in range(K): all_scores[k].append([]) ijj = 0 for cap_data", "num_frames, dim_embed) verb_embeds = kwargs['verb_embeds'] # (batch, num_verbs, dim_embed) verb_lens", "max_vis_len, dim_embed) vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits, max_len = self.submods[VISENC](vid_fts,", "self.calculate_metrics(fused_scores, i2t_gts, t2i_gts) if return_outs: outs = { 'vid_names': vid_names,", "if self.config.loss_weights is None: loss = fusion_loss + 1*(vbce_loss+nbce_loss) +", "##### vid_lens = kwargs['vid_lens'] # (batch, ) num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1)", "not None and self.config.monitor_iter > 0 and step % self.config.monitor_iter", "sim_loss %.4f, vsim_loss %.4f, nsim_loss %.4f'%(step, concept_loss.item(), concept_verb_loss.item(), concept_noun_loss.item())) return", "mean scores i2t %.2f, t2i %.2f'%( step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]),", "< 0] = 0 vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2) if mask_flag:", "kwargs['verb_lens'] # (batch, num_verbs) local_vid_embeds =kwargs['local_vid_embeds'] local_sent_embeds = kwargs['local_sent_embeds'] verb_masks", "+ np.mean(scores[3:5], 0))/2 metrics = self.calculate_metrics(fused_scores, i2t_gts, t2i_gts) if return_outs:", "= torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1) vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum) nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1],", "s_bs, -1) s = s.unsqueeze(0).expand(im_bs, -1, -1) intersection = torch.min(im,", "(batch, num_frames) - phrase_embeds: (batch, num_phrases, embed_size) - phrase_masks: (batch,", "tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size): cap_enc_outs = self.forward_text_embed(cap_data) cap_enc_outs.update(vid_enc_outs) indv_scores = self.generate_scores(**cap_enc_outs) for k", "concept_loss else: loss = self.config.loss_weights[0] * fusion_loss + \\ self.config.loss_weights[1]", "RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig): def __init__(self): super().__init__() self.num_verbs = 4 self.num_nouns = 6", "num_nouns, dim_embed) noun_lens = kwargs['noun_lens'] # (batch, num_nouns) noun_masks =", "sent_logits = self.submods[TXTENC]( sent_ids, sent_lens, verb_masks, noun_masks, node_roles, rel_edges) return", "None: loss = fusion_loss + 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss", "fusion_loss.data.item())) self.print_fn('\\tstep %d: vbce_loss %.4f, nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item())) self.print_fn('\\tstep", "for vid_data in tst_reader: vid_names.extend(vid_data['names']) vid_enc_outs = self.forward_video_embed(vid_data) for k", "vid_names.extend(vid_data['names']) vid_enc_outs = self.forward_video_embed(vid_data) for k in range(K): all_scores[k].append([]) ijj", "vid_lens = kwargs['vid_lens'] # (batch, ) num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1) vid_masks", "\\ self.config.loss_weights[1] * sent_loss + \\ self.config.loss_weights[2] * verb_loss +", "nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item())) self.print_fn('\\tstep %d: vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step,", "as np import torch import framework.ops import t2vretrieval.encoders.mlsent import t2vretrieval.encoders.mlvideo", "self.num_nouns = 6 self.attn_fusion = 'embed' # sim, embed self.simattn_sigma", "= 'embed' # sim, embed self.simattn_sigma = 4 self.hard_topk =", "self.simattn_sigma = 4 self.hard_topk = 1 self.max_violation = True self.loss_weights", "vid_masks, noun_embeds, noun_masks) ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks, True)", "= framework.ops.l2norm(vid_attn_per_word, dim=2) if mask_flag: vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) #################", "(batch, num_nouns) noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(), self.config.num_nouns, inverse=False)", "torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1) nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum) vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label),", "= kwargs['verb_lens'] # (batch, num_verbs) local_vid_embeds =kwargs['local_vid_embeds'] local_sent_embeds = kwargs['local_sent_embeds']", "by model.json due to the functions of load and load_from_dict", "cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size): cap_enc_outs = self.forward_text_embed(cap_data) cap_enc_outs.update(vid_enc_outs) indv_scores = self.generate_scores(**cap_enc_outs)", "elif self.config.attn_fusion == 'sim': # (batch_vids, batch_phrases, num_phrases) word_attn_sims =", "%.2f'%( step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]), torch.mean(torch.max(neg_scores, 0)[0]))) self.print_fn('\\tstep %d: sent_loss", "torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device) v_mask_sum = torch.sum(verb_concept_mask, dim=1) n_mask_sum = torch.sum(noun_concept_mask, dim=1) vbce_loss", "ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks, True) ##### noun-level scores", "vbce_sent_loss.item(), nbce_sent_loss.item())) self.print_fn('\\tstep %d: sim_loss %.4f, vsim_loss %.4f, nsim_loss %.4f'%(step,", "2) # sent_embeds: (batch, dim_embed) # verb_embeds, noun_embeds: (batch, num_xxx,", "for vid_name in vid_names: i2t_gts.append([]) for i, cap_name in enumerate(cap_names):", "metrics = self.calculate_metrics(fused_scores, i2t_gts, t2i_gts) if return_outs: outs = {", "ijj = 0 for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size): cap_enc_outs = self.forward_text_embed(cap_data)", "torch.FloatTensor(batch_data['verb_concept_label']).to(self.device) noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device) verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device) noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device)", "+ nbce_loss if step is not None and self.config.monitor_iter >", "= phrase_embeds.view(-1, dim_embed) # size = (batch_vids, batch_phrases, num_frames, num_phrases)", "vbce_loss.item(), nbce_loss.item())) self.print_fn('\\tstep %d: vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item()))", "= mu*torch.mean(vbce_sent_loss/v_mask_sum) nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1) nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum)", "noun_loss = self.criterion(noun_scores) eta = 0.1 mu = 0.01 concept_verb_loss", "vsim_loss %.4f, nsim_loss %.4f'%(step, concept_loss.item(), concept_verb_loss.item(), concept_noun_loss.item())) return loss def", "= 6 self.attn_fusion = 'embed' # sim, embed self.simattn_sigma =", "= [0, 1, 2, 5, 6] fused_scores = (np.mean(scores[idx], 0)", "concept_loss.item(), concept_verb_loss.item(), concept_noun_loss.item())) return loss def evaluate_scores(self, tst_reader): K =", "framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(), self.config.num_nouns, inverse=False) # sum: (batch_vids, batch_sents)", "% self.config.monitor_iter == 0: neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10) self.print_fn('\\tstep" ]
[ "user[\"acl\"] = jsonable_encoder(profiles) sql = \"\"\"UPDATE users.tbl_users SET token =", "HTTPBasic, HTTPBasicCredentials, APIKeyHeader from typing import List from starlette.responses import", "= jsonable_encoder(profiles) sql = \"\"\"UPDATE users.tbl_users SET token = :token", "sha256 from authentication.interfaces.token import verify_token router = APIRouter() security =", "authorization.username, \"secret\": authorization.password}, ) if not profiles: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user[\"acl\"]", "tags=[\"token\"]) async def renew_token( response: Response, user: dict = Depends(verify_token),", "str = Header(None), ): response.headers[\"x-api-key\"] = x_api_key return {\"verified\": True,", "authorization.username, \"secret\": authorization.password}, ) if not users: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user", "user: dict = Depends(verify_token)): sql = \"\"\"UPDATE users.tbl_users SET token", "def renew_token(x_api_key: str = Header(None)): # return {\"ok\": x_api_key} @router.get(", ": <NAME> (<EMAIL>) # @Link : # @Date : 12/12/2019,", "x_api_key} @router.get( \"/login\", response_model=User, tags=[\"auth\"], response_model_exclude_unset=True ) async def login_basic(", "response: Response, user: dict = Depends(verify_token), x_api_key: str = Header(None),", "tu.passwd is NOT NULL AND tu.passwd = <PASSWORD>(:<PASSWORD>,tu.<PASSWORD>) AND tu.email", "-*- # # login.py # @Author : <NAME> (<EMAIL>) #", "mhash.hexdigest() await database.execute(query=sql, values={\"id\": user[\"id\"], \"token\": token}) response.headers[\"x-api-key\"] = jwt.encode(", "= APIRouter() security = HTTPBasic(auto_error=True) api_key = APIKeyHeader(name=\"x-api-key\", auto_error=True) @router.post(\"/login\",", "on tpu.id_users = tu.id inner join users.tbl_profile tp on tp.id", "SET token = :token WHERE id = :id\"\"\" token =", "login_basic( response: Response, authorization: HTTPBasicCredentials = Security(security) ): sql =", "<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- # #", "HTTPException(status_code=HTTP_401_UNAUTHORIZED) user = jsonable_encoder(users) sql = \"\"\"SELECT tp.acl_profile as profile", "timedelta(hours=8)))}, token, algorithm=\"HS256\", ).decode() return {\"renew\": True} # @router.post(\"/login\", dependencies=[Depends(verify_token)])", "return {\"ok\": x_api_key} @router.get( \"/login\", response_model=User, tags=[\"auth\"], response_model_exclude_unset=True ) async", "join users.tbl_profile_users tpu on tpu.id_users = tu.id inner join users.tbl_profile", "= \"\"\"SELECT tp.acl_profile as profile FROM users.tbl_users tu inner join", "\"\"\" users = await database.fetch_one( query=sql, values={\"email\": authorization.username, \"secret\": authorization.password},", "HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED from datetime import datetime, timedelta from hashlib import", "{**user, **dict(exp=(datetime.now() + timedelta(hours=8)))}, token, algorithm=\"HS256\", ).decode() return {\"renew\": True}", "= sha256(token.encode(\"utf-8\")) token = mhash.hexdigest() await database.execute(query=sql, values={\"id\": user[\"id\"], \"token\":", "inner join users.tbl_profile_users tpu on tpu.id_users = tu.id inner join", "APIRouter, Body, Depends, HTTPException from fastapi import Header, Security from", "Response, authorization: HTTPBasicCredentials = Security(security) ): sql = \"\"\"SELECT tu.id,", "id = :id\"\"\" token = f\"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token", "dict = Depends(verify_token)): sql = \"\"\"UPDATE users.tbl_users SET token =", "Security from authentication.models.users import User from fastapi.security import HTTPBasic, HTTPBasicCredentials,", "datetime, timedelta from hashlib import sha256 from authentication.interfaces.token import verify_token", "@Author : <NAME> (<EMAIL>) # @Link : # @Date :", "True} # @router.post(\"/login\", dependencies=[Depends(verify_token)]) # async def renew_token(x_api_key: str =", "sql = \"\"\"SELECT tp.acl_profile as profile FROM users.tbl_users tu inner", "starlette.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED from datetime import datetime, timedelta from", "HTTPBasicCredentials, APIKeyHeader from typing import List from starlette.responses import Response", "from typing import List from starlette.responses import Response from fastapi.encoders", "users.tbl_users tu WHERE tu.passwd is NOT NULL AND tu.passwd =", "fastapi.encoders import jsonable_encoder from authentication.interfaces.database import database import jwt from", "APIKeyHeader from typing import List from starlette.responses import Response from", "import APIRouter, Body, Depends, HTTPException from fastapi import Header, Security", "true \"\"\" users = await database.fetch_one( query=sql, values={\"email\": authorization.username, \"secret\":", "fastapi import APIRouter, Body, Depends, HTTPException from fastapi import Header,", "f\"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token = mhash.hexdigest() await database.execute(query=sql, values={\"id\":", "renew_token(x_api_key: str = Header(None)): # return {\"ok\": x_api_key} @router.get( \"/login\",", "tu.email = :email AND tu.enabled = true \"\"\" users =", "= jwt.encode( {**user, **dict(exp=(datetime.now() + timedelta(hours=8)))}, token, algorithm=\"HS256\", ).decode() return", "async def login_basic( response: Response, authorization: HTTPBasicCredentials = Security(security) ):", "#!/usr/bin/env python # -*- coding: utf-8 -*- # # login.py", "import List from starlette.responses import Response from fastapi.encoders import jsonable_encoder", "= :email\"\"\" profiles = await database.fetch_all( query=sql, values={\"email\": authorization.username, \"secret\":", "user: dict = Depends(verify_token), x_api_key: str = Header(None), ): response.headers[\"x-api-key\"]", "Depends, HTTPException from fastapi import Header, Security from authentication.models.users import", ":token WHERE id = :id\"\"\" token = f\"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}\" mhash =", "tpu on tpu.id_users = tu.id inner join users.tbl_profile tp on", "tu.linkedin_id , tu.pwd_updated_at FROM users.tbl_users tu WHERE tu.passwd is NOT", "# async def renew_token(x_api_key: str = Header(None)): # return {\"ok\":", "HTTPBasicCredentials = Security(security) ): sql = \"\"\"SELECT tu.id, tu.email, tu.\"name\",", "= :id\"\"\" token = f\"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token =", "async def renew_token(response: Response, user: dict = Depends(verify_token)): sql =", "coding: utf-8 -*- # # login.py # @Author : <NAME>", "sql = \"\"\"UPDATE users.tbl_users SET token = :token WHERE id", "return {\"verified\": True, \"user\": user[\"email\"]} @router.put(\"/login\", tags=[\"token\"]) async def renew_token(response:", "from fastapi.security import HTTPBasic, HTTPBasicCredentials, APIKeyHeader from typing import List", "tu.id inner join users.tbl_profile tp on tp.id = tpu.id_profile WHERE", "= <PASSWORD>(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email\"\"\" profiles = await database.fetch_all(", "algorithm=\"HS256\", ).decode() return {\"renew\": True} # @router.post(\"/login\", dependencies=[Depends(verify_token)]) # async", "**dict(exp=(datetime.now() + timedelta(hours=8)))}, token, algorithm=\"HS256\", ).decode() return {\"renew\": True} #", "APIKeyHeader(name=\"x-api-key\", auto_error=True) @router.post(\"/login\", tags=[\"token\"]) async def renew_token( response: Response, user:", "users.tbl_profile_users tpu on tpu.id_users = tu.id inner join users.tbl_profile tp", "from fastapi.encoders import jsonable_encoder from authentication.interfaces.database import database import jwt", "= tu.id inner join users.tbl_profile tp on tp.id = tpu.id_profile", "): sql = \"\"\"SELECT tu.id, tu.email, tu.\"name\", tu.linkedin_id , tu.pwd_updated_at", "router = APIRouter() security = HTTPBasic(auto_error=True) api_key = APIKeyHeader(name=\"x-api-key\", auto_error=True)", ":email\"\"\" profiles = await database.fetch_all( query=sql, values={\"email\": authorization.username, \"secret\": authorization.password},", "authorization: HTTPBasicCredentials = Security(security) ): sql = \"\"\"SELECT tu.id, tu.email,", "= Header(None), ): response.headers[\"x-api-key\"] = x_api_key return {\"verified\": True, \"user\":", "NULL AND tu.passwd = <PASSWORD>(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email\"\"\" profiles", "= mhash.hexdigest() await database.execute(query=sql, values={\"id\": user[\"id\"], \"token\": token}) response.headers[\"x-api-key\"] =", "@router.post(\"/login\", tags=[\"token\"]) async def renew_token( response: Response, user: dict =", "tags=[\"token\"]) async def renew_token(response: Response, user: dict = Depends(verify_token)): sql", "Response from fastapi.encoders import jsonable_encoder from authentication.interfaces.database import database import", "12/12/2019, 11:43:07 AM from typing import Optional, Any from fastapi", "HTTP_401_UNAUTHORIZED from datetime import datetime, timedelta from hashlib import sha256", "token = mhash.hexdigest() await database.execute(query=sql, values={\"id\": user[\"id\"], \"token\": token}) response.headers[\"x-api-key\"]", "async def renew_token(x_api_key: str = Header(None)): # return {\"ok\": x_api_key}", "id = :id\"\"\" token = f\"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token", "renew_token(response: Response, user: dict = Depends(verify_token)): sql = \"\"\"UPDATE users.tbl_users", "\"\"\"SELECT tp.acl_profile as profile FROM users.tbl_users tu inner join users.tbl_profile_users", "AND tu.passwd = <PASSWORD>(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email\"\"\" profiles =", "starlette.responses import Response from fastapi.encoders import jsonable_encoder from authentication.interfaces.database import", "token = f\"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token = mhash.hexdigest() await", "WHERE id = :id\"\"\" token = f\"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\"))", "# @router.post(\"/login\", dependencies=[Depends(verify_token)]) # async def renew_token(x_api_key: str = Header(None)):", "not profiles: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user[\"acl\"] = jsonable_encoder(profiles) sql = \"\"\"UPDATE", "tpu.id_users = tu.id inner join users.tbl_profile tp on tp.id =", "tu.id, tu.email, tu.\"name\", tu.linkedin_id , tu.pwd_updated_at FROM users.tbl_users tu WHERE", "{\"verified\": True, \"user\": user[\"email\"]} @router.put(\"/login\", tags=[\"token\"]) async def renew_token(response: Response,", "from fastapi import APIRouter, Body, Depends, HTTPException from fastapi import", "User from fastapi.security import HTTPBasic, HTTPBasicCredentials, APIKeyHeader from typing import", "@Link : # @Date : 12/12/2019, 11:43:07 AM from typing", "Optional, Any from fastapi import APIRouter, Body, Depends, HTTPException from", "AND tu.email = :email\"\"\" profiles = await database.fetch_all( query=sql, values={\"email\":", "datetime import datetime, timedelta from hashlib import sha256 from authentication.interfaces.token", "Header(None)): # return {\"ok\": x_api_key} @router.get( \"/login\", response_model=User, tags=[\"auth\"], response_model_exclude_unset=True", "# return {\"ok\": x_api_key} @router.get( \"/login\", response_model=User, tags=[\"auth\"], response_model_exclude_unset=True )", "= :token WHERE id = :id\"\"\" token = f\"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}\" mhash", "token = :token WHERE id = :id\"\"\" token = f\"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}\"", "= x_api_key return {\"verified\": True, \"user\": user[\"email\"]} @router.put(\"/login\", tags=[\"token\"]) async", "join users.tbl_profile tp on tp.id = tpu.id_profile WHERE tu.passwd is", "database.execute(query=sql, values={\"id\": user[\"id\"], \"token\": token}) response.headers[\"x-api-key\"] = jwt.encode( {**user, **dict(exp=(datetime.now()", "def renew_token( response: Response, user: dict = Depends(verify_token), x_api_key: str", "Depends(verify_token), x_api_key: str = Header(None), ): response.headers[\"x-api-key\"] = x_api_key return", "(<EMAIL>) # @Link : # @Date : 12/12/2019, 11:43:07 AM", "crypt(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email AND tu.enabled = true \"\"\"", "AND tu.email = :email AND tu.enabled = true \"\"\" users", "token = f\"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token = mhash.hexdigest() await", "\"token\": token}) response.headers[\"x-api-key\"] = jwt.encode( {**user, **dict(exp=(datetime.now() + timedelta(hours=8)))}, token,", "typing import Optional, Any from fastapi import APIRouter, Body, Depends,", "from starlette.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED from datetime import datetime, timedelta", "tu.pwd_updated_at FROM users.tbl_users tu WHERE tu.passwd is NOT NULL AND", "from authentication.interfaces.database import database import jwt from starlette.status import HTTP_400_BAD_REQUEST,", "users.tbl_users SET token = :token WHERE id = :id\"\"\" token", "NOT NULL AND tu.passwd = <PASSWORD>(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email\"\"\"", "tp on tp.id = tpu.id_profile WHERE tu.passwd is NOT NULL", "\"/login\", response_model=User, tags=[\"auth\"], response_model_exclude_unset=True ) async def login_basic( response: Response,", "@Date : 12/12/2019, 11:43:07 AM from typing import Optional, Any", "from fastapi import Header, Security from authentication.models.users import User from", "WHERE tu.passwd is NOT NULL AND tu.passwd = <PASSWORD>(:<PASSWORD>,tu.<PASSWORD>) AND", ") if not users: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user = jsonable_encoder(users) sql", "Header(None), ): response.headers[\"x-api-key\"] = x_api_key return {\"verified\": True, \"user\": user[\"email\"]}", "raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user[\"acl\"] = jsonable_encoder(profiles) sql = \"\"\"UPDATE users.tbl_users SET", "tu.passwd = <PASSWORD>(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email\"\"\" profiles = await", "query=sql, values={\"email\": authorization.username, \"secret\": authorization.password}, ) if not users: raise", "profiles = await database.fetch_all( query=sql, values={\"email\": authorization.username, \"secret\": authorization.password}, )", "jsonable_encoder from authentication.interfaces.database import database import jwt from starlette.status import", "import jsonable_encoder from authentication.interfaces.database import database import jwt from starlette.status", "from authentication.models.users import User from fastapi.security import HTTPBasic, HTTPBasicCredentials, APIKeyHeader", "from authentication.interfaces.token import verify_token router = APIRouter() security = HTTPBasic(auto_error=True)", "verify_token router = APIRouter() security = HTTPBasic(auto_error=True) api_key = APIKeyHeader(name=\"x-api-key\",", "response.headers[\"x-api-key\"] = jwt.encode( {**user, **dict(exp=(datetime.now() + timedelta(hours=8)))}, token, algorithm=\"HS256\", ).decode()", "tp.id = tpu.id_profile WHERE tu.passwd is NOT NULL AND tu.passwd", "AND tu.enabled = true \"\"\" users = await database.fetch_one( query=sql,", "List from starlette.responses import Response from fastapi.encoders import jsonable_encoder from", "fastapi.security import HTTPBasic, HTTPBasicCredentials, APIKeyHeader from typing import List from", "\"secret\": authorization.password}, ) if not profiles: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user[\"acl\"] =", "if not users: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user = jsonable_encoder(users) sql =", "import Header, Security from authentication.models.users import User from fastapi.security import", "authorization.password}, ) if not profiles: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user[\"acl\"] = jsonable_encoder(profiles)", "database import jwt from starlette.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED from datetime", "= await database.fetch_all( query=sql, values={\"email\": authorization.username, \"secret\": authorization.password}, ) if", "await database.fetch_all( query=sql, values={\"email\": authorization.username, \"secret\": authorization.password}, ) if not", "Security(security) ): sql = \"\"\"SELECT tu.id, tu.email, tu.\"name\", tu.linkedin_id ,", ", tu.pwd_updated_at FROM users.tbl_users tu WHERE tu.passwd is NOT NULL", "utf-8 -*- # # login.py # @Author : <NAME> (<EMAIL>)", "@router.post(\"/login\", dependencies=[Depends(verify_token)]) # async def renew_token(x_api_key: str = Header(None)): #", "tu.passwd = crypt(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email AND tu.enabled =", "jsonable_encoder(users) sql = \"\"\"SELECT tp.acl_profile as profile FROM users.tbl_users tu", "x_api_key: str = Header(None), ): response.headers[\"x-api-key\"] = x_api_key return {\"verified\":", "\"\"\"UPDATE users.tbl_users SET token = :token WHERE id = :id\"\"\"", "import HTTPBasic, HTTPBasicCredentials, APIKeyHeader from typing import List from starlette.responses", "tu.email, tu.\"name\", tu.linkedin_id , tu.pwd_updated_at FROM users.tbl_users tu WHERE tu.passwd", "str = Header(None)): # return {\"ok\": x_api_key} @router.get( \"/login\", response_model=User,", "def login_basic( response: Response, authorization: HTTPBasicCredentials = Security(security) ): sql", "tags=[\"auth\"], response_model_exclude_unset=True ) async def login_basic( response: Response, authorization: HTTPBasicCredentials", "timedelta from hashlib import sha256 from authentication.interfaces.token import verify_token router", "= crypt(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email AND tu.enabled = true", "database.fetch_one( query=sql, values={\"email\": authorization.username, \"secret\": authorization.password}, ) if not users:", "is NOT NULL AND tu.passwd = crypt(:<PASSWORD>,tu.<PASSWORD>) AND tu.email =", "dict = Depends(verify_token), x_api_key: str = Header(None), ): response.headers[\"x-api-key\"] =", "# # login.py # @Author : <NAME> (<EMAIL>) # @Link", "Any from fastapi import APIRouter, Body, Depends, HTTPException from fastapi", "tu.enabled = true \"\"\" users = await database.fetch_one( query=sql, values={\"email\":", "import User from fastapi.security import HTTPBasic, HTTPBasicCredentials, APIKeyHeader from typing", "-*- coding: utf-8 -*- # # login.py # @Author :", "from datetime import datetime, timedelta from hashlib import sha256 from", "# @Author : <NAME> (<EMAIL>) # @Link : # @Date", "True, \"user\": user[\"email\"]} @router.put(\"/login\", tags=[\"token\"]) async def renew_token(response: Response, user:", ":id\"\"\" token = f\"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token = mhash.hexdigest()", "user[\"id\"], \"token\": token}) response.headers[\"x-api-key\"] = jwt.encode( {**user, **dict(exp=(datetime.now() + timedelta(hours=8)))},", "f\"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token = mhash.hexdigest() await database.execute(query=sql, values={\"id\":", "authentication.interfaces.token import verify_token router = APIRouter() security = HTTPBasic(auto_error=True) api_key", "x_api_key return {\"verified\": True, \"user\": user[\"email\"]} @router.put(\"/login\", tags=[\"token\"]) async def", ":email AND tu.enabled = true \"\"\" users = await database.fetch_one(", "import Response from fastapi.encoders import jsonable_encoder from authentication.interfaces.database import database", "is NOT NULL AND tu.passwd = <PASSWORD>(:<PASSWORD>,tu.<PASSWORD>) AND tu.email =", "return {\"renew\": True} # @router.post(\"/login\", dependencies=[Depends(verify_token)]) # async def renew_token(x_api_key:", "response.headers[\"x-api-key\"] = x_api_key return {\"verified\": True, \"user\": user[\"email\"]} @router.put(\"/login\", tags=[\"token\"])", "import verify_token router = APIRouter() security = HTTPBasic(auto_error=True) api_key =", "= jsonable_encoder(users) sql = \"\"\"SELECT tp.acl_profile as profile FROM users.tbl_users", "def renew_token(response: Response, user: dict = Depends(verify_token)): sql = \"\"\"UPDATE", "@router.put(\"/login\", tags=[\"token\"]) async def renew_token(response: Response, user: dict = Depends(verify_token)):", "on tp.id = tpu.id_profile WHERE tu.passwd is NOT NULL AND", "FROM users.tbl_users tu inner join users.tbl_profile_users tpu on tpu.id_users =", "users = await database.fetch_one( query=sql, values={\"email\": authorization.username, \"secret\": authorization.password}, )", "fastapi import Header, Security from authentication.models.users import User from fastapi.security", "mhash = sha256(token.encode(\"utf-8\")) token = mhash.hexdigest() await database.execute(query=sql, values={\"id\": user[\"id\"],", "NOT NULL AND tu.passwd = crypt(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email", "\"secret\": authorization.password}, ) if not users: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user =", "response: Response, authorization: HTTPBasicCredentials = Security(security) ): sql = \"\"\"SELECT", ": 12/12/2019, 11:43:07 AM from typing import Optional, Any from", "tu inner join users.tbl_profile_users tpu on tpu.id_users = tu.id inner", ":token WHERE id = :id\"\"\" token = f\"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}\" mhash =", "profiles: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user[\"acl\"] = jsonable_encoder(profiles) sql = \"\"\"UPDATE users.tbl_users", "<PASSWORD>(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email\"\"\" profiles = await database.fetch_all( query=sql,", "typing import List from starlette.responses import Response from fastapi.encoders import", "tpu.id_profile WHERE tu.passwd is NOT NULL AND tu.passwd = <PASSWORD>(:<PASSWORD>,tu.<PASSWORD>)", "tu WHERE tu.passwd is NOT NULL AND tu.passwd = crypt(:<PASSWORD>,tu.<PASSWORD>)", "= :token WHERE id = :id\"\"\" token = f\"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}\" mhash", "profile FROM users.tbl_users tu inner join users.tbl_profile_users tpu on tpu.id_users", "jwt from starlette.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED from datetime import datetime,", "= Header(None)): # return {\"ok\": x_api_key} @router.get( \"/login\", response_model=User, tags=[\"auth\"],", "values={\"email\": authorization.username, \"secret\": authorization.password}, ) if not profiles: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED)", "<NAME> (<EMAIL>) # @Link : # @Date : 12/12/2019, 11:43:07", "inner join users.tbl_profile tp on tp.id = tpu.id_profile WHERE tu.passwd", "HTTPBasic(auto_error=True) api_key = APIKeyHeader(name=\"x-api-key\", auto_error=True) @router.post(\"/login\", tags=[\"token\"]) async def renew_token(", "database.fetch_all( query=sql, values={\"email\": authorization.username, \"secret\": authorization.password}, ) if not profiles:", "= Depends(verify_token), x_api_key: str = Header(None), ): response.headers[\"x-api-key\"] = x_api_key", "hashlib import sha256 from authentication.interfaces.token import verify_token router = APIRouter()", "jwt.encode( {**user, **dict(exp=(datetime.now() + timedelta(hours=8)))}, token, algorithm=\"HS256\", ).decode() return {\"renew\":", "token, algorithm=\"HS256\", ).decode() return {\"renew\": True} # @router.post(\"/login\", dependencies=[Depends(verify_token)]) #", "user = jsonable_encoder(users) sql = \"\"\"SELECT tp.acl_profile as profile FROM", "Body, Depends, HTTPException from fastapi import Header, Security from authentication.models.users", ":id\"\"\" token = f\"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token = mhash.hexdigest()", "sql = \"\"\"SELECT tu.id, tu.email, tu.\"name\", tu.linkedin_id , tu.pwd_updated_at FROM", "authentication.interfaces.database import database import jwt from starlette.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED", "= Depends(verify_token)): sql = \"\"\"UPDATE users.tbl_users SET token = :token", "query=sql, values={\"email\": authorization.username, \"secret\": authorization.password}, ) if not profiles: raise", "as profile FROM users.tbl_users tu inner join users.tbl_profile_users tpu on", "WHERE tu.passwd is NOT NULL AND tu.passwd = crypt(:<PASSWORD>,tu.<PASSWORD>) AND", "from starlette.responses import Response from fastapi.encoders import jsonable_encoder from authentication.interfaces.database", "import datetime, timedelta from hashlib import sha256 from authentication.interfaces.token import", "raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user = jsonable_encoder(users) sql = \"\"\"SELECT tp.acl_profile as", "user[\"email\"]} @router.put(\"/login\", tags=[\"token\"]) async def renew_token(response: Response, user: dict =", "= \"\"\"SELECT tu.id, tu.email, tu.\"name\", tu.linkedin_id , tu.pwd_updated_at FROM users.tbl_users", "users.tbl_profile tp on tp.id = tpu.id_profile WHERE tu.passwd is NOT", "WHERE id = :id\"\"\" token = f\"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\"))", "import database import jwt from starlette.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED from", "Header, Security from authentication.models.users import User from fastapi.security import HTTPBasic,", "login.py # @Author : <NAME> (<EMAIL>) # @Link : #", "users: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user = jsonable_encoder(users) sql = \"\"\"SELECT tp.acl_profile", "NULL AND tu.passwd = crypt(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email AND", "users.tbl_users tu inner join users.tbl_profile_users tpu on tpu.id_users = tu.id", "): response.headers[\"x-api-key\"] = x_api_key return {\"verified\": True, \"user\": user[\"email\"]} @router.put(\"/login\",", ": # @Date : 12/12/2019, 11:43:07 AM from typing import", "FROM users.tbl_users tu WHERE tu.passwd is NOT NULL AND tu.passwd", "= true \"\"\" users = await database.fetch_one( query=sql, values={\"email\": authorization.username,", "jwt.encode( {**user, **dict(exp=(datetime.now() + timedelta(hours=8)))}, token, algorithm=\"HS256\", ).decode() return user", "from typing import Optional, Any from fastapi import APIRouter, Body,", "sha256(token.encode(\"utf-8\")) token = mhash.hexdigest() await database.execute(query=sql, values={\"id\": user[\"id\"], \"token\": token})", ").decode() return {\"renew\": True} # @router.post(\"/login\", dependencies=[Depends(verify_token)]) # async def", "security = HTTPBasic(auto_error=True) api_key = APIKeyHeader(name=\"x-api-key\", auto_error=True) @router.post(\"/login\", tags=[\"token\"]) async", "token}) response.headers[\"x-api-key\"] = jwt.encode( {**user, **dict(exp=(datetime.now() + timedelta(hours=8)))}, token, algorithm=\"HS256\",", "renew_token( response: Response, user: dict = Depends(verify_token), x_api_key: str =", "= :email AND tu.enabled = true \"\"\" users = await", "= \"\"\"UPDATE users.tbl_users SET token = :token WHERE id =", "tu.passwd is NOT NULL AND tu.passwd = crypt(:<PASSWORD>,tu.<PASSWORD>) AND tu.email", "= tpu.id_profile WHERE tu.passwd is NOT NULL AND tu.passwd =", "import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED from datetime import datetime, timedelta from hashlib", ") if not profiles: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user[\"acl\"] = jsonable_encoder(profiles) sql", "from hashlib import sha256 from authentication.interfaces.token import verify_token router =", "Depends(verify_token)): sql = \"\"\"UPDATE users.tbl_users SET token = :token WHERE", "APIRouter() security = HTTPBasic(auto_error=True) api_key = APIKeyHeader(name=\"x-api-key\", auto_error=True) @router.post(\"/login\", tags=[\"token\"])", "HTTPException(status_code=HTTP_401_UNAUTHORIZED) user[\"acl\"] = jsonable_encoder(profiles) sql = \"\"\"UPDATE users.tbl_users SET token", "# login.py # @Author : <NAME> (<EMAIL>) # @Link :", "= :id\"\"\" token = f\"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token =", "= APIKeyHeader(name=\"x-api-key\", auto_error=True) @router.post(\"/login\", tags=[\"token\"]) async def renew_token( response: Response,", "not users: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user = jsonable_encoder(users) sql = \"\"\"SELECT", "await database.execute(query=sql, values={\"id\": user[\"id\"], \"token\": token}) response.headers[\"x-api-key\"] = jwt.encode( {**user,", "= Security(security) ): sql = \"\"\"SELECT tu.id, tu.email, tu.\"name\", tu.linkedin_id", "python # -*- coding: utf-8 -*- # # login.py #", "authorization.password}, ) if not users: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user = jsonable_encoder(users)", "authentication.models.users import User from fastapi.security import HTTPBasic, HTTPBasicCredentials, APIKeyHeader from", "async def renew_token( response: Response, user: dict = Depends(verify_token), x_api_key:", "# -*- coding: utf-8 -*- # # login.py # @Author", "tu.email = :email\"\"\" profiles = await database.fetch_all( query=sql, values={\"email\": authorization.username,", "{\"renew\": True} # @router.post(\"/login\", dependencies=[Depends(verify_token)]) # async def renew_token(x_api_key: str", "auto_error=True) @router.post(\"/login\", tags=[\"token\"]) async def renew_token( response: Response, user: dict", "await database.fetch_one( query=sql, values={\"email\": authorization.username, \"secret\": authorization.password}, ) if not", "{\"ok\": x_api_key} @router.get( \"/login\", response_model=User, tags=[\"auth\"], response_model_exclude_unset=True ) async def", "if not profiles: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED) user[\"acl\"] = jsonable_encoder(profiles) sql =", "HTTPException from fastapi import Header, Security from authentication.models.users import User", "= HTTPBasic(auto_error=True) api_key = APIKeyHeader(name=\"x-api-key\", auto_error=True) @router.post(\"/login\", tags=[\"token\"]) async def", "response_model=User, tags=[\"auth\"], response_model_exclude_unset=True ) async def login_basic( response: Response, authorization:", "= await database.fetch_one( query=sql, values={\"email\": authorization.username, \"secret\": authorization.password}, ) if", "values={\"id\": user[\"id\"], \"token\": token}) response.headers[\"x-api-key\"] = jwt.encode( {**user, **dict(exp=(datetime.now() +", "# @Date : 12/12/2019, 11:43:07 AM from typing import Optional,", "AM from typing import Optional, Any from fastapi import APIRouter,", "+ timedelta(hours=8)))}, token, algorithm=\"HS256\", ).decode() return {\"renew\": True} # @router.post(\"/login\",", "= f\"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token = mhash.hexdigest() await database.execute(query=sql,", "import Optional, Any from fastapi import APIRouter, Body, Depends, HTTPException", "@router.get( \"/login\", response_model=User, tags=[\"auth\"], response_model_exclude_unset=True ) async def login_basic( response:", "tp.acl_profile as profile FROM users.tbl_users tu inner join users.tbl_profile_users tpu", "jsonable_encoder(profiles) sql = \"\"\"UPDATE users.tbl_users SET token = :token WHERE", "\"\"\"SELECT tu.id, tu.email, tu.\"name\", tu.linkedin_id , tu.pwd_updated_at FROM users.tbl_users tu", "import jwt from starlette.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED from datetime import", "Response, user: dict = Depends(verify_token), x_api_key: str = Header(None), ):", "token = :token WHERE id = :id\"\"\" token = f\"{user['pwd_updated_at']}-{authorization.username}-{datetime.now()}\"", "= f\"{user['pwd_updated_at']}-{user['email']}-{datetime.now()}\" mhash = sha256(token.encode(\"utf-8\")) token = mhash.hexdigest() await database.execute(query=sql,", "11:43:07 AM from typing import Optional, Any from fastapi import", "import sha256 from authentication.interfaces.token import verify_token router = APIRouter() security", "response_model_exclude_unset=True ) async def login_basic( response: Response, authorization: HTTPBasicCredentials =", "values={\"email\": authorization.username, \"secret\": authorization.password}, ) if not users: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED)", ") async def login_basic( response: Response, authorization: HTTPBasicCredentials = Security(security)", "# @Link : # @Date : 12/12/2019, 11:43:07 AM from", "dependencies=[Depends(verify_token)]) # async def renew_token(x_api_key: str = Header(None)): # return", "Response, user: dict = Depends(verify_token)): sql = \"\"\"UPDATE users.tbl_users SET", "\"user\": user[\"email\"]} @router.put(\"/login\", tags=[\"token\"]) async def renew_token(response: Response, user: dict", "api_key = APIKeyHeader(name=\"x-api-key\", auto_error=True) @router.post(\"/login\", tags=[\"token\"]) async def renew_token( response:", "AND tu.passwd = crypt(:<PASSWORD>,tu.<PASSWORD>) AND tu.email = :email AND tu.enabled", "tu.\"name\", tu.linkedin_id , tu.pwd_updated_at FROM users.tbl_users tu WHERE tu.passwd is" ]
[ "home) drone.launch() while retries > 0: try: drone_conn = connect(drone.connection_string(),", "alt : \" + str(drone.location.global_relative_frame.alt))) if drone.location.global_relative_frame.alt >= target_height *", "launch(self): home_str = str(self.home['lat']) + ',' + str(self.home['lon']) + ',0,353'", ": {\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": status}) return", "\"status\" : \"OK\", \"id\" : drone_id } def run_mission(drone, target_height,", "{\"lat\":6.9271, \"lon\":79.8612, \"alt\": 1} self.p = None return def connection_string(self):", "if drone.mode == VehicleMode('AUTO'): return { \"status\" : \"ERROR\", \"msg\"", "value): node.update_drone(drone_id, { \"pitch\": value.pitch, 'roll': value.roll, 'yaw': value.yaw })", "except: raise drone.initialize() drone.mode = VehicleMode('GUIDED') drone.armed = True while", "env_test = False q = None mq = None lock", "def udpate_attitude(self, attr_name, value): node.update_drone(drone_id, { \"pitch\": value.pitch, 'roll': value.roll,", "env_test) def get_sitl_status(self): return { 'id': self.instance, 'home': self.home }", ">= target_height * 0.9: break print ('target alt reached') mavparser.create_mission(drone,", "\"location\" : {\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": \"FLYING\"})", "diff, 0) % len(waypoints) waypoint = waypoints[next_wp] # print \"df:", "= kwargs.get('attach_fn', None) if not fn == None and not", "Sim(instance_count, home) drone.launch() while retries > 0: try: drone_conn =", "update_location(self, attr_name, value): node.update_drone(drone_id, { \"location\" : {\"lat\": value.global_relative_frame.lat, \"lon\":", "update_heading) node.update_drone(drone_id, { \"location\" : {\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\":", "node.get_drones()['drones'] if not drones: return for drone_id in drones: if", ": location })) if 'status' in list(drone.keys()) and drone['status'] ==", "} drone = drone_pool[drone_id] if drone.mode == VehicleMode('AUTO'): return {", "= instance if home: self.home = home else: self.home =", "str(self.home['lat']) + ',' + str(self.home['lon']) + ',0,353' super(Sim, self).launch([\"--instance\", str(self.instance),", "= node.get_drone_by_id(drone_id) waypoints = [] for wp in sorted(drone['waypoints']): waypoints.append(drone['waypoints'][wp])", "res def remove_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None) if drone_id not", "drones: return for drone_id in drones: if drone_id not in", "\"fn\" : udpate_attitude })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\"", "})) def create_new_drone(kwargs): global instance_count instance_count += 1 home =", "lock = Lock() class Sim(SITL, object): def __init__(self, instance=1, home=None):", "interaction with SITLs from dronekit import Vehicle, VehicleMode, connect from", "= node.get_drones()['drones'] if not drones: return for drone_id in drones:", "attach_listener(kwargs): attr = kwargs.get('attr', None) fn = kwargs.get('fn', None) attach_fn", "% len(waypoints) waypoint = waypoints[next_wp] # print \"df: \" +", "kwargs.get(\"db_key\", None) retries = 3 drone = Sim(instance_count, home) drone.launch()", "attach_fn == None: attach_fn(attr, fn) def takeoff_drone(kwargs): global q drone_id", "if drone.location.global_relative_frame.alt >= target_height * 0.9: break print ('target alt", "kwargs.get('attach_fn', None) if not fn == None and not attr", "}) if drone.mode == VehicleMode('LAND') and drone.location.global_relative_frame.alt <= 0.1: detach_event_listeners(drone,", "udpate_attitude) drone.remove_attribute_listener('heading', update_heading) node.update_drone(drone_id, { \"location\" : {\"lat\": value.global_relative_frame.lat, \"lon\":", "update_airspeed })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'attitude',", "update_heading(self, attr_name, value): node.update_drone(drone_id, { \"heading\": value }) mq.put((attach_listener, {", "False cmds = drone.commands cmds.wait_ready() cmds.clear() drone.mode = VehicleMode('LAND') print((drone.mode))", "waypoints) def detach_event_listeners(drone, value, status): drone.remove_attribute_listener('location', update_location) drone.remove_attribute_listener('airspeed', update_airspeed) drone.remove_attribute_listener('attitude',", ">= wp_len : diff = command_len - wp_len next_wp =", "super(Sim, self).launch([\"--instance\", str(self.instance), \"--home\", home_str], await_ready=True, verbose=not env_test) def get_sitl_status(self):", "= None mq = None lock = Lock() class Sim(SITL,", "mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'attitude', \"fn\" :", "found\" } drone = drone_pool[drone_id] if drone.mode == VehicleMode('AUTO'): return", ": udpate_attitude })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" :", "return { \"status\" : \"ERROR\", \"msg\" : \"Drone in operation\"", "= drone.commands cmds.wait_ready() cmds.clear() drone.mode = VehicleMode('LAND') print((drone.mode)) return True", "= None return def connection_string(self): return super(Sim, self).connection_string()[:-4] + str(5760", ": diff = command_len - wp_len next_wp = max(drone.commands.__next__ -", "value): node.update_drone(drone_id, {\"airspeed\": value}) def udpate_attitude(self, attr_name, value): node.update_drone(drone_id, {", "max(drone.commands.__next__ - diff, 0) % len(waypoints) waypoint = waypoints[next_wp] #", "import Lock import node, time import mavparser import threadrunner drone_pool", "attr_name, value): node.update_drone(drone_id, { \"heading\": value }) mq.put((attach_listener, { \"attach_fn\"", "\"FINISHED\") return def update_airspeed(self, attr_name, value): node.update_drone(drone_id, {\"airspeed\": value}) def", "update_location })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'airspeed',", "def resume_flight(kwargs): drone_id = kwargs.get(\"drone_id\", None) drone = node.get_drone_by_id(drone_id) waypoints", "not attach_fn == None: attach_fn(attr, fn) def takeoff_drone(kwargs): global q", "print((\"Reaching target alt : \" + str(drone.location.global_relative_frame.alt))) if drone.location.global_relative_frame.alt >=", "1 drone_pool[db_key] = drone_conn res = { \"status\" : \"OK\",", "+ ',0,353' super(Sim, self).launch([\"--instance\", str(self.instance), \"--home\", home_str], await_ready=True, verbose=not env_test)", "\"alt\": value.global_relative_frame.alt}, \"status\": status}) return def update_location(self, attr_name, value): node.update_drone(drone_id,", "',0,353' super(Sim, self).launch([\"--instance\", str(self.instance), \"--home\", home_str], await_ready=True, verbose=not env_test) def", "0.9: break print ('target alt reached') mavparser.create_mission(drone, waypoints) print ('mission", "target_height, waypoints): while True: print((\"Reaching target alt : \" +", "while True: print((\"Reaching target alt : \" + str(drone.location.global_relative_frame.alt))) if", "= threadrunner.q mq = threadrunner.mq drones = node.get_drones()['drones'] if not", "VehicleMode('GUIDED') drone.armed = True while not drone.armed: time.sleep(1) drone.simple_takeoff(target_height) print", "None and not attach_fn == None: attach_fn(attr, fn) def takeoff_drone(kwargs):", "mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'heading', \"fn\" :", "attach_fn = kwargs.get('attach_fn', None) if not fn == None and", "\"pitch\": value.pitch, 'roll': value.roll, 'yaw': value.yaw }) def update_heading(self, attr_name,", "node.get_drone_by_id(drone_id) waypoints = [] for wp in sorted(drone['waypoints']): waypoints.append(drone['waypoints'][wp]) next_waypoint", "next_waypoint = waypoints.index(drone['waypoint']) print (next_waypoint) q.put((takeoff_drone, { \"drone_id\" : drone_id,", "q.put((takeoff_drone, { \"drone_id\" : drone_id, \"waypoints\" : waypoints[next_waypoint:] })) def", "waypoints.index(drone['waypoint']) print (next_waypoint) q.put((takeoff_drone, { \"drone_id\" : drone_id, \"waypoints\" :", "command_len = len(drone.commands) wp_len = len(waypoints) if command_len >= wp_len", "wp_len next_wp = max(drone.commands.__next__ - diff, 0) % len(waypoints) waypoint", "if waypoints: run_mission(drone, target_height, waypoints) def detach_event_listeners(drone, value, status): drone.remove_attribute_listener('location',", "threadrunner.q mq = threadrunner.mq drones = node.get_drones()['drones'] if not drones:", "\"alt\": 1} self.p = None return def connection_string(self): return super(Sim,", ": drone.add_attribute_listener, \"attr\" : 'location', \"fn\" : update_location })) mq.put((attach_listener,", "drone = drone_pool[drone_id] except: raise drone.initialize() drone.mode = VehicleMode('GUIDED') drone.armed", "def connection_string(self): return super(Sim, self).connection_string()[:-4] + str(5760 + self.instance *", "drone_conn = connect(drone.connection_string(), wait_ready=True) break except: print (\"Retrying...\") retries -=", "drone_id in drones: if drone_id not in list(drone_pool.keys()): drone =", "waypoints: run_mission(drone, target_height, waypoints) def detach_event_listeners(drone, value, status): drone.remove_attribute_listener('location', update_location)", "{ \"heading\": value }) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\"", "drone_pool: return { \"status\" : \"ERROR\", \"msg\" : \"Drone instance", "= len(drone.commands) wp_len = len(waypoints) if command_len >= wp_len :", "})) if 'status' in list(drone.keys()) and drone['status'] == 'FLYING': q.put((resume_flight,", "drones = node.get_drones()['drones'] if not drones: return for drone_id in", "except: print (\"Retrying...\") retries -= 1 drone_pool[db_key] = drone_conn res", "10) waypoints = kwargs.get(\"waypoints\", None) try: drone = drone_pool[drone_id] except:", "drone.mode == VehicleMode('LAND') and drone.location.global_relative_frame.alt <= 0.1: detach_event_listeners(drone, value, \"HALTED\")", "self.instance * 10) def launch(self): home_str = str(self.home['lat']) + ','", "node.update_drone(drone_id, { \"waypoint\" : waypoint }) if drone.mode == VehicleMode('LAND')", "+ str(5760 + self.instance * 10) def launch(self): home_str =", "detach_event_listeners(drone, value, \"HALTED\") return if drone.commands.__next__ == len(drone.commands): detach_event_listeners(drone, value,", "drone_pool[drone_id] except: raise if not drone.armed: return False cmds =", "('initiating sequence') print ('in mission') def attach_listener(kwargs): attr = kwargs.get('attr',", "drone_conn res = { \"status\" : \"OK\", \"id\" : db_key", "+ `diff` # print next_wp node.update_drone(drone_id, { \"waypoint\" : waypoint", "return def update_airspeed(self, attr_name, value): node.update_drone(drone_id, {\"airspeed\": value}) def udpate_attitude(self,", "fn == None and not attr == None and not", "[] for wp in sorted(drone['waypoints']): waypoints.append(drone['waypoints'][wp]) next_waypoint = waypoints.index(drone['waypoint']) print", "None) if not fn == None and not attr ==", "mq = None lock = Lock() class Sim(SITL, object): def", "self.instance = instance if home: self.home = home else: self.home", "and not attr == None and not attach_fn == None:", "drone_id = kwargs.get(\"drone_id\", None) if drone_id not in drone_pool: return", "in drones: if drone_id not in list(drone_pool.keys()): drone = node.get_drone_by_id(drone_id)", "time.sleep(1) drone.simple_takeoff(target_height) print (waypoints) if waypoints: run_mission(drone, target_height, waypoints) def", "def run_mission(drone, target_height, waypoints): while True: print((\"Reaching target alt :", "print ('took off') return True def land_drone(kwargs): drone_id = kwargs.get(\"drone_id\",", "dronekit_sitl import SITL from threading import Lock import node, time", "self.home = home else: self.home = {\"lat\":6.9271, \"lon\":79.8612, \"alt\": 1}", "\"drone_id\" : drone_id, \"waypoints\" : waypoints[next_waypoint:] })) def create_new_drone(kwargs): global", "\"--home\", home_str], await_ready=True, verbose=not env_test) def get_sitl_status(self): return { 'id':", "drone_id not in drone_pool: return { \"status\" : \"ERROR\", \"msg\"", "})) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'attitude', \"fn\"", "import Vehicle, VehicleMode, connect from dronekit_sitl import SITL from threading", "\"alt\": value.global_relative_frame.alt}, \"status\": \"FLYING\"}) command_len = len(drone.commands) wp_len = len(waypoints)", "self.home } def initialize(): global q, mq, instance_count q =", "= drone_pool[drone_id] except: raise if not drone.armed: return False cmds", "retries > 0: try: drone_conn = connect(drone.connection_string(), wait_ready=True) break except:", "return False cmds = drone.commands cmds.wait_ready() cmds.clear() drone.mode = VehicleMode('LAND')", "if not drone.armed: return False cmds = drone.commands cmds.wait_ready() cmds.clear()", "instance if home: self.home = home else: self.home = {\"lat\":6.9271,", "= kwargs.get(\"drone_id\", None) target_height = kwargs.get(\"target_height\", 10) waypoints = kwargs.get(\"waypoints\",", "3 drone = Sim(instance_count, home) drone.launch() while retries > 0:", "\"lon\":79.8612, \"alt\": 1} self.p = None return def connection_string(self): return", "\"id\" : drone_id } def run_mission(drone, target_height, waypoints): while True:", "None lock = Lock() class Sim(SITL, object): def __init__(self, instance=1,", "} def run_mission(drone, target_height, waypoints): while True: print((\"Reaching target alt", "= kwargs.get(\"drone_id\", None) try: drone = drone_pool[drone_id] except: raise if", "except: raise if not drone.armed: return False cmds = drone.commands", "drone.armed: return False cmds = drone.commands cmds.wait_ready() cmds.clear() drone.mode =", "None) fn = kwargs.get('fn', None) attach_fn = kwargs.get('attach_fn', None) if", "+ self.instance * 10) def launch(self): home_str = str(self.home['lat']) +", "VehicleMode('LAND') and drone.location.global_relative_frame.alt <= 0.1: detach_event_listeners(drone, value, \"HALTED\") return if", "threading import Lock import node, time import mavparser import threadrunner", "return def update_location(self, attr_name, value): node.update_drone(drone_id, { \"location\" : {\"lat\":", "{} instance_count = 0 env_test = False q = None", "'status' in list(drone.keys()) and drone['status'] == 'FLYING': q.put((resume_flight, { \"drone_id\"", "Vehicle, VehicleMode, connect from dronekit_sitl import SITL from threading import", "= node.get_drone_by_id(drone_id) location = drone['location'] q.put((create_new_drone, { \"db_key\" : drone_id,", ": drone_id, \"waypoints\" : waypoints[next_waypoint:] })) def create_new_drone(kwargs): global instance_count", "SITLs from dronekit import Vehicle, VehicleMode, connect from dronekit_sitl import", "kwargs.get(\"drone_id\", None) drone = node.get_drone_by_id(drone_id) waypoints = [] for wp", "print (waypoints) if waypoints: run_mission(drone, target_height, waypoints) def detach_event_listeners(drone, value,", "('took off') return True def land_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None)", "attr == None and not attach_fn == None: attach_fn(attr, fn)", "\"FLYING\"}) command_len = len(drone.commands) wp_len = len(waypoints) if command_len >=", "while retries > 0: try: drone_conn = connect(drone.connection_string(), wait_ready=True) break", "== 'FLYING': q.put((resume_flight, { \"drone_id\" : drone_id })) def resume_flight(kwargs):", "None: attach_fn(attr, fn) def takeoff_drone(kwargs): global q drone_id = kwargs.get(\"drone_id\",", "object): def __init__(self, instance=1, home=None): super(Sim, self).download(\"copter\", \"3.3\", verbose=not env_test)", "module which handles interaction with SITLs from dronekit import Vehicle,", "drone.mode = VehicleMode('AUTO') print ('initiating sequence') print ('in mission') def", "waypoints = [] for wp in sorted(drone['waypoints']): waypoints.append(drone['waypoints'][wp]) next_waypoint =", "wp in sorted(drone['waypoints']): waypoints.append(drone['waypoints'][wp]) next_waypoint = waypoints.index(drone['waypoint']) print (next_waypoint) q.put((takeoff_drone,", "self.p = None return def connection_string(self): return super(Sim, self).connection_string()[:-4] +", "and not attach_fn == None: attach_fn(attr, fn) def takeoff_drone(kwargs): global", "True: print((\"Reaching target alt : \" + str(drone.location.global_relative_frame.alt))) if drone.location.global_relative_frame.alt", ": update_location })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" :", "drones: if drone_id not in list(drone_pool.keys()): drone = node.get_drone_by_id(drone_id) location", "'yaw': value.yaw }) def update_heading(self, attr_name, value): node.update_drone(drone_id, { \"heading\":", "value, \"HALTED\") return if drone.commands.__next__ == len(drone.commands): detach_event_listeners(drone, value, \"FINISHED\")", "global instance_count instance_count += 1 home = kwargs.get(\"home\", None) db_key", "0: try: drone_conn = connect(drone.connection_string(), wait_ready=True) break except: print (\"Retrying...\")", "and drone['status'] == 'FLYING': q.put((resume_flight, { \"drone_id\" : drone_id }))", "print ('target alt reached') mavparser.create_mission(drone, waypoints) print ('mission acquired') drone.mode", "value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": \"FLYING\"}) command_len = len(drone.commands) wp_len =", "q, mq, instance_count q = threadrunner.q mq = threadrunner.mq drones", "value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": status}) return def update_location(self, attr_name, value):", "def update_heading(self, attr_name, value): node.update_drone(drone_id, { \"heading\": value }) mq.put((attach_listener,", "= kwargs.get(\"drone_id\", None) if drone_id not in drone_pool: return {", "> 0: try: drone_conn = connect(drone.connection_string(), wait_ready=True) break except: print", "\"HALTED\") return if drone.commands.__next__ == len(drone.commands): detach_event_listeners(drone, value, \"FINISHED\") return", "if drone.mode == VehicleMode('LAND') and drone.location.global_relative_frame.alt <= 0.1: detach_event_listeners(drone, value,", "\"attr\" : 'heading', \"fn\" : update_heading })) print ('took off')", "from dronekit import Vehicle, VehicleMode, connect from dronekit_sitl import SITL", "fn) def takeoff_drone(kwargs): global q drone_id = kwargs.get(\"drone_id\", None) target_height", "\"df: \" + `diff` # print next_wp node.update_drone(drone_id, { \"waypoint\"", "\"msg\" : \"Drone instance not found\" } drone = drone_pool[drone_id]", "value, status): drone.remove_attribute_listener('location', update_location) drone.remove_attribute_listener('airspeed', update_airspeed) drone.remove_attribute_listener('attitude', udpate_attitude) drone.remove_attribute_listener('heading', update_heading)", "cmds = drone.commands cmds.wait_ready() cmds.clear() drone.mode = VehicleMode('LAND') print((drone.mode)) return", "print next_wp node.update_drone(drone_id, { \"waypoint\" : waypoint }) if drone.mode", "`diff` # print next_wp node.update_drone(drone_id, { \"waypoint\" : waypoint })", "if 'status' in list(drone.keys()) and drone['status'] == 'FLYING': q.put((resume_flight, {", "None) if drone_id not in drone_pool: return { \"status\" :", "def get_sitl_status(self): return { 'id': self.instance, 'home': self.home } def", "{ \"location\" : {\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\":", "= str(self.home['lat']) + ',' + str(self.home['lon']) + ',0,353' super(Sim, self).launch([\"--instance\",", "drone.remove_attribute_listener('airspeed', update_airspeed) drone.remove_attribute_listener('attitude', udpate_attitude) drone.remove_attribute_listener('heading', update_heading) node.update_drone(drone_id, { \"location\" :", "if home: self.home = home else: self.home = {\"lat\":6.9271, \"lon\":79.8612,", "== VehicleMode('LAND') and drone.location.global_relative_frame.alt <= 0.1: detach_event_listeners(drone, value, \"HALTED\") return", "retries = 3 drone = Sim(instance_count, home) drone.launch() while retries", "def initialize(): global q, mq, instance_count q = threadrunner.q mq", "in sorted(drone['waypoints']): waypoints.append(drone['waypoints'][wp]) next_waypoint = waypoints.index(drone['waypoint']) print (next_waypoint) q.put((takeoff_drone, {", "VehicleMode('AUTO') print ('initiating sequence') print ('in mission') def attach_listener(kwargs): attr", "wp_len = len(waypoints) if command_len >= wp_len : diff =", "\"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": \"FLYING\"}) command_len = len(drone.commands) wp_len", "waypoint = waypoints[next_wp] # print \"df: \" + `diff` #", "\"heading\": value }) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" :", "run_mission(drone, target_height, waypoints): while True: print((\"Reaching target alt : \"", "= {} instance_count = 0 env_test = False q =", "+= 1 home = kwargs.get(\"home\", None) db_key = kwargs.get(\"db_key\", None)", "kwargs.get(\"drone_id\", None) target_height = kwargs.get(\"target_height\", 10) waypoints = kwargs.get(\"waypoints\", None)", "= kwargs.get(\"waypoints\", None) try: drone = drone_pool[drone_id] except: raise drone.initialize()", "not fn == None and not attr == None and", "drone = Sim(instance_count, home) drone.launch() while retries > 0: try:", "\"status\" : \"OK\", \"id\" : db_key } return res def", "for wp in sorted(drone['waypoints']): waypoints.append(drone['waypoints'][wp]) next_waypoint = waypoints.index(drone['waypoint']) print (next_waypoint)", "sequence') print ('in mission') def attach_listener(kwargs): attr = kwargs.get('attr', None)", "verbose=not env_test) def get_sitl_status(self): return { 'id': self.instance, 'home': self.home", "== VehicleMode('AUTO'): return { \"status\" : \"ERROR\", \"msg\" : \"Drone", "next_wp node.update_drone(drone_id, { \"waypoint\" : waypoint }) if drone.mode ==", "len(drone.commands): detach_event_listeners(drone, value, \"FINISHED\") return def update_airspeed(self, attr_name, value): node.update_drone(drone_id,", "mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'location', \"fn\" :", "drone.launch() while retries > 0: try: drone_conn = connect(drone.connection_string(), wait_ready=True)", "kwargs.get('fn', None) attach_fn = kwargs.get('attach_fn', None) if not fn ==", "value.global_relative_frame.alt}, \"status\": status}) return def update_location(self, attr_name, value): node.update_drone(drone_id, {", "instance_count instance_count += 1 home = kwargs.get(\"home\", None) db_key =", "not drone.armed: time.sleep(1) drone.simple_takeoff(target_height) print (waypoints) if waypoints: run_mission(drone, target_height,", "drone_id = kwargs.get(\"drone_id\", None) try: drone = drone_pool[drone_id] except: raise", "{ \"status\" : \"OK\", \"id\" : db_key } return res", "update_airspeed(self, attr_name, value): node.update_drone(drone_id, {\"airspeed\": value}) def udpate_attitude(self, attr_name, value):", ": \"OK\", \"id\" : db_key } return res def remove_drone(kwargs):", "connect from dronekit_sitl import SITL from threading import Lock import", "threadrunner.mq drones = node.get_drones()['drones'] if not drones: return for drone_id", "= drone_conn res = { \"status\" : \"OK\", \"id\" :", "= waypoints.index(drone['waypoint']) print (next_waypoint) q.put((takeoff_drone, { \"drone_id\" : drone_id, \"waypoints\"", "= VehicleMode('GUIDED') drone.armed = True while not drone.armed: time.sleep(1) drone.simple_takeoff(target_height)", "super(Sim, self).download(\"copter\", \"3.3\", verbose=not env_test) self.instance = instance if home:", "10) def launch(self): home_str = str(self.home['lat']) + ',' + str(self.home['lon'])", "kwargs.get(\"target_height\", 10) waypoints = kwargs.get(\"waypoints\", None) try: drone = drone_pool[drone_id]", "if not fn == None and not attr == None", "} def initialize(): global q, mq, instance_count q = threadrunner.q", ": drone.add_attribute_listener, \"attr\" : 'attitude', \"fn\" : udpate_attitude })) mq.put((attach_listener,", "False q = None mq = None lock = Lock()", "+ str(self.home['lon']) + ',0,353' super(Sim, self).launch([\"--instance\", str(self.instance), \"--home\", home_str], await_ready=True,", "\"db_key\" : drone_id, \"home\" : location })) if 'status' in", "= command_len - wp_len next_wp = max(drone.commands.__next__ - diff, 0)", ": drone.add_attribute_listener, \"attr\" : 'airspeed', \"fn\" : update_airspeed })) mq.put((attach_listener,", "self).download(\"copter\", \"3.3\", verbose=not env_test) self.instance = instance if home: self.home", "value.pitch, 'roll': value.roll, 'yaw': value.yaw }) def update_heading(self, attr_name, value):", "(waypoints) if waypoints: run_mission(drone, target_height, waypoints) def detach_event_listeners(drone, value, status):", "0.1: detach_event_listeners(drone, value, \"HALTED\") return if drone.commands.__next__ == len(drone.commands): detach_event_listeners(drone,", "return True def land_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None) try: drone", "kwargs.get('attr', None) fn = kwargs.get('fn', None) attach_fn = kwargs.get('attach_fn', None)", "value }) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'location',", "value): node.update_drone(drone_id, { \"location\" : {\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\":", "{\"airspeed\": value}) def udpate_attitude(self, attr_name, value): node.update_drone(drone_id, { \"pitch\": value.pitch,", "None mq = None lock = Lock() class Sim(SITL, object):", "})) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'heading', \"fn\"", "'roll': value.roll, 'yaw': value.yaw }) def update_heading(self, attr_name, value): node.update_drone(drone_id,", "value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": \"FLYING\"}) command_len = len(drone.commands)", ": waypoints[next_waypoint:] })) def create_new_drone(kwargs): global instance_count instance_count += 1", "= 3 drone = Sim(instance_count, home) drone.launch() while retries >", "drone.commands.__next__ == len(drone.commands): detach_event_listeners(drone, value, \"FINISHED\") return def update_airspeed(self, attr_name,", "node.update_drone(drone_id, {\"airspeed\": value}) def udpate_attitude(self, attr_name, value): node.update_drone(drone_id, { \"pitch\":", "update_airspeed) drone.remove_attribute_listener('attitude', udpate_attitude) drone.remove_attribute_listener('heading', update_heading) node.update_drone(drone_id, { \"location\" : {\"lat\":", "update_heading })) print ('took off') return True def land_drone(kwargs): drone_id", "'FLYING': q.put((resume_flight, { \"drone_id\" : drone_id })) def resume_flight(kwargs): drone_id", "attr_name, value): node.update_drone(drone_id, {\"airspeed\": value}) def udpate_attitude(self, attr_name, value): node.update_drone(drone_id,", "True while not drone.armed: time.sleep(1) drone.simple_takeoff(target_height) print (waypoints) if waypoints:", "home=None): super(Sim, self).download(\"copter\", \"3.3\", verbose=not env_test) self.instance = instance if", "def remove_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None) if drone_id not in", "kwargs.get(\"waypoints\", None) try: drone = drone_pool[drone_id] except: raise drone.initialize() drone.mode", "= waypoints[next_wp] # print \"df: \" + `diff` # print", "instance=1, home=None): super(Sim, self).download(\"copter\", \"3.3\", verbose=not env_test) self.instance = instance", "}) def update_heading(self, attr_name, value): node.update_drone(drone_id, { \"heading\": value })", "self).launch([\"--instance\", str(self.instance), \"--home\", home_str], await_ready=True, verbose=not env_test) def get_sitl_status(self): return", "udpate_attitude(self, attr_name, value): node.update_drone(drone_id, { \"pitch\": value.pitch, 'roll': value.roll, 'yaw':", "kwargs.get(\"drone_id\", None) if drone_id not in drone_pool: return { \"status\"", ": drone.add_attribute_listener, \"attr\" : 'heading', \"fn\" : update_heading })) print", "drone_pool[drone_id] return { \"status\" : \"OK\", \"id\" : drone_id }", "not found\" } drone = drone_pool[drone_id] if drone.mode == VehicleMode('AUTO'):", "= home else: self.home = {\"lat\":6.9271, \"lon\":79.8612, \"alt\": 1} self.p", "raise if not drone.armed: return False cmds = drone.commands cmds.wait_ready()", "waypoints.append(drone['waypoints'][wp]) next_waypoint = waypoints.index(drone['waypoint']) print (next_waypoint) q.put((takeoff_drone, { \"drone_id\" :", "\"id\" : db_key } return res def remove_drone(kwargs): drone_id =", "def launch(self): home_str = str(self.home['lat']) + ',' + str(self.home['lon']) +", "* 10) def launch(self): home_str = str(self.home['lat']) + ',' +", "not attr == None and not attach_fn == None: attach_fn(attr,", "None) retries = 3 drone = Sim(instance_count, home) drone.launch() while", "= connect(drone.connection_string(), wait_ready=True) break except: print (\"Retrying...\") retries -= 1", "if not drones: return for drone_id in drones: if drone_id", "def attach_listener(kwargs): attr = kwargs.get('attr', None) fn = kwargs.get('fn', None)", "{\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": \"FLYING\"}) command_len =", "next_wp = max(drone.commands.__next__ - diff, 0) % len(waypoints) waypoint =", ": 'location', \"fn\" : update_location })) mq.put((attach_listener, { \"attach_fn\" :", "drone_id } def run_mission(drone, target_height, waypoints): while True: print((\"Reaching target", "global q drone_id = kwargs.get(\"drone_id\", None) target_height = kwargs.get(\"target_height\", 10)", "not drones: return for drone_id in drones: if drone_id not", "'home': self.home } def initialize(): global q, mq, instance_count q", "* 0.9: break print ('target alt reached') mavparser.create_mission(drone, waypoints) print", "attach_fn(attr, fn) def takeoff_drone(kwargs): global q drone_id = kwargs.get(\"drone_id\", None)", "from threading import Lock import node, time import mavparser import", "= kwargs.get('attr', None) fn = kwargs.get('fn', None) attach_fn = kwargs.get('attach_fn',", "if drone.commands.__next__ == len(drone.commands): detach_event_listeners(drone, value, \"FINISHED\") return def update_airspeed(self,", "which handles interaction with SITLs from dronekit import Vehicle, VehicleMode,", "drone['location'] q.put((create_new_drone, { \"db_key\" : drone_id, \"home\" : location }))", "break except: print (\"Retrying...\") retries -= 1 drone_pool[db_key] = drone_conn", "drone.remove_attribute_listener('heading', update_heading) node.update_drone(drone_id, { \"location\" : {\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon,", "print ('mission acquired') drone.mode = VehicleMode('AUTO') print ('initiating sequence') print", "= drone_pool[drone_id] except: raise drone.initialize() drone.mode = VehicleMode('GUIDED') drone.armed =", "reached') mavparser.create_mission(drone, waypoints) print ('mission acquired') drone.mode = VehicleMode('AUTO') print", "{ \"status\" : \"OK\", \"id\" : drone_id } def run_mission(drone,", "drone_pool[drone_id] except: raise drone.initialize() drone.mode = VehicleMode('GUIDED') drone.armed = True", "import SITL from threading import Lock import node, time import", "def update_airspeed(self, attr_name, value): node.update_drone(drone_id, {\"airspeed\": value}) def udpate_attitude(self, attr_name,", "self.home = {\"lat\":6.9271, \"lon\":79.8612, \"alt\": 1} self.p = None return", "('in mission') def attach_listener(kwargs): attr = kwargs.get('attr', None) fn =", "drone.remove_attribute_listener('attitude', udpate_attitude) drone.remove_attribute_listener('heading', update_heading) node.update_drone(drone_id, { \"location\" : {\"lat\": value.global_relative_frame.lat,", "{ \"drone_id\" : drone_id })) def resume_flight(kwargs): drone_id = kwargs.get(\"drone_id\",", "\"attr\" : 'attitude', \"fn\" : udpate_attitude })) mq.put((attach_listener, { \"attach_fn\"", "drone_id = kwargs.get(\"drone_id\", None) target_height = kwargs.get(\"target_height\", 10) waypoints =", "{ \"db_key\" : drone_id, \"home\" : location })) if 'status'", "\"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'location', \"fn\" : update_location }))", "-= 1 drone_pool[db_key] = drone_conn res = { \"status\" :", "drone.mode = VehicleMode('GUIDED') drone.armed = True while not drone.armed: time.sleep(1)", "{ \"waypoint\" : waypoint }) if drone.mode == VehicleMode('LAND') and", "== None: attach_fn(attr, fn) def takeoff_drone(kwargs): global q drone_id =", "value.yaw }) def update_heading(self, attr_name, value): node.update_drone(drone_id, { \"heading\": value", "\"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'attitude', \"fn\" : udpate_attitude }))", "return { 'id': self.instance, 'home': self.home } def initialize(): global", "VehicleMode, connect from dronekit_sitl import SITL from threading import Lock", "return { \"status\" : \"OK\", \"id\" : drone_id } def", "\"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'airspeed', \"fn\" : update_airspeed }))", "wait_ready=True) break except: print (\"Retrying...\") retries -= 1 drone_pool[db_key] =", "import node, time import mavparser import threadrunner drone_pool = {}", "= kwargs.get(\"drone_id\", None) drone = node.get_drone_by_id(drone_id) waypoints = [] for", "= [] for wp in sorted(drone['waypoints']): waypoints.append(drone['waypoints'][wp]) next_waypoint = waypoints.index(drone['waypoint'])", "not drone.armed: return False cmds = drone.commands cmds.wait_ready() cmds.clear() drone.mode", "kwargs.get(\"home\", None) db_key = kwargs.get(\"db_key\", None) retries = 3 drone", "None return def connection_string(self): return super(Sim, self).connection_string()[:-4] + str(5760 +", "# print next_wp node.update_drone(drone_id, { \"waypoint\" : waypoint }) if", "try: drone = drone_pool[drone_id] except: raise if not drone.armed: return", "import threadrunner drone_pool = {} instance_count = 0 env_test =", "{ \"status\" : \"ERROR\", \"msg\" : \"Drone instance not found\"", "in list(drone.keys()) and drone['status'] == 'FLYING': q.put((resume_flight, { \"drone_id\" :", "time import mavparser import threadrunner drone_pool = {} instance_count =", "list(drone.keys()) and drone['status'] == 'FLYING': q.put((resume_flight, { \"drone_id\" : drone_id", ": 'heading', \"fn\" : update_heading })) print ('took off') return", "drone_id not in list(drone_pool.keys()): drone = node.get_drone_by_id(drone_id) location = drone['location']", "drone.location.global_relative_frame.alt <= 0.1: detach_event_listeners(drone, value, \"HALTED\") return if drone.commands.__next__ ==", ": db_key } return res def remove_drone(kwargs): drone_id = kwargs.get(\"drone_id\",", ": update_airspeed })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" :", "try: drone = drone_pool[drone_id] except: raise drone.initialize() drone.mode = VehicleMode('GUIDED')", "def takeoff_drone(kwargs): global q drone_id = kwargs.get(\"drone_id\", None) target_height =", "\"status\" : \"ERROR\", \"msg\" : \"Drone in operation\" } del", "for drone_id in drones: if drone_id not in list(drone_pool.keys()): drone", "remove_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None) if drone_id not in drone_pool:", "def detach_event_listeners(drone, value, status): drone.remove_attribute_listener('location', update_location) drone.remove_attribute_listener('airspeed', update_airspeed) drone.remove_attribute_listener('attitude', udpate_attitude)", "and drone.location.global_relative_frame.alt <= 0.1: detach_event_listeners(drone, value, \"HALTED\") return if drone.commands.__next__", "\"3.3\", verbose=not env_test) self.instance = instance if home: self.home =", "drone = node.get_drone_by_id(drone_id) waypoints = [] for wp in sorted(drone['waypoints']):", "from dronekit_sitl import SITL from threading import Lock import node,", "instance_count q = threadrunner.q mq = threadrunner.mq drones = node.get_drones()['drones']", "VehicleMode('AUTO'): return { \"status\" : \"ERROR\", \"msg\" : \"Drone in", "{ 'id': self.instance, 'home': self.home } def initialize(): global q,", "connection_string(self): return super(Sim, self).connection_string()[:-4] + str(5760 + self.instance * 10)", "',' + str(self.home['lon']) + ',0,353' super(Sim, self).launch([\"--instance\", str(self.instance), \"--home\", home_str],", "q = None mq = None lock = Lock() class", "return def connection_string(self): return super(Sim, self).connection_string()[:-4] + str(5760 + self.instance", "'id': self.instance, 'home': self.home } def initialize(): global q, mq,", "not in list(drone_pool.keys()): drone = node.get_drone_by_id(drone_id) location = drone['location'] q.put((create_new_drone,", "res = { \"status\" : \"OK\", \"id\" : db_key }", "str(drone.location.global_relative_frame.alt))) if drone.location.global_relative_frame.alt >= target_height * 0.9: break print ('target", "{\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": status}) return def", "value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": status}) return def update_location(self,", "db_key } return res def remove_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None)", "instance_count += 1 home = kwargs.get(\"home\", None) db_key = kwargs.get(\"db_key\",", "run_mission(drone, target_height, waypoints) def detach_event_listeners(drone, value, status): drone.remove_attribute_listener('location', update_location) drone.remove_attribute_listener('airspeed',", "drone['status'] == 'FLYING': q.put((resume_flight, { \"drone_id\" : drone_id })) def", "<= 0.1: detach_event_listeners(drone, value, \"HALTED\") return if drone.commands.__next__ == len(drone.commands):", "acquired') drone.mode = VehicleMode('AUTO') print ('initiating sequence') print ('in mission')", "None) db_key = kwargs.get(\"db_key\", None) retries = 3 drone =", "status): drone.remove_attribute_listener('location', update_location) drone.remove_attribute_listener('airspeed', update_airspeed) drone.remove_attribute_listener('attitude', udpate_attitude) drone.remove_attribute_listener('heading', update_heading) node.update_drone(drone_id,", "instance not found\" } drone = drone_pool[drone_id] if drone.mode ==", "= kwargs.get(\"home\", None) db_key = kwargs.get(\"db_key\", None) retries = 3", "def __init__(self, instance=1, home=None): super(Sim, self).download(\"copter\", \"3.3\", verbose=not env_test) self.instance", "\"msg\" : \"Drone in operation\" } del drone_pool[drone_id] return {", "'location', \"fn\" : update_location })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener,", "waypoints): while True: print((\"Reaching target alt : \" + str(drone.location.global_relative_frame.alt)))", "home else: self.home = {\"lat\":6.9271, \"lon\":79.8612, \"alt\": 1} self.p =", "\"fn\" : update_location })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\"", "location })) if 'status' in list(drone.keys()) and drone['status'] == 'FLYING':", "node.update_drone(drone_id, { \"pitch\": value.pitch, 'roll': value.roll, 'yaw': value.yaw }) def", "threadrunner drone_pool = {} instance_count = 0 env_test = False", "home: self.home = home else: self.home = {\"lat\":6.9271, \"lon\":79.8612, \"alt\":", "# print \"df: \" + `diff` # print next_wp node.update_drone(drone_id,", "fn = kwargs.get('fn', None) attach_fn = kwargs.get('attach_fn', None) if not", "drone.add_attribute_listener, \"attr\" : 'location', \"fn\" : update_location })) mq.put((attach_listener, {", "verbose=not env_test) self.instance = instance if home: self.home = home", "del drone_pool[drone_id] return { \"status\" : \"OK\", \"id\" : drone_id", "location = drone['location'] q.put((create_new_drone, { \"db_key\" : drone_id, \"home\" :", "not in drone_pool: return { \"status\" : \"ERROR\", \"msg\" :", "= kwargs.get(\"target_height\", 10) waypoints = kwargs.get(\"waypoints\", None) try: drone =", "= 0 env_test = False q = None mq =", "= True while not drone.armed: time.sleep(1) drone.simple_takeoff(target_height) print (waypoints) if", "\"status\": status}) return def update_location(self, attr_name, value): node.update_drone(drone_id, { \"location\"", "return super(Sim, self).connection_string()[:-4] + str(5760 + self.instance * 10) def", "Sim(SITL, object): def __init__(self, instance=1, home=None): super(Sim, self).download(\"copter\", \"3.3\", verbose=not", "len(drone.commands) wp_len = len(waypoints) if command_len >= wp_len : diff", "\"waypoints\" : waypoints[next_waypoint:] })) def create_new_drone(kwargs): global instance_count instance_count +=", "q = threadrunner.q mq = threadrunner.mq drones = node.get_drones()['drones'] if", ": drone_id })) def resume_flight(kwargs): drone_id = kwargs.get(\"drone_id\", None) drone", "\"location\" : {\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": status})", "+ str(drone.location.global_relative_frame.alt))) if drone.location.global_relative_frame.alt >= target_height * 0.9: break print", "None) try: drone = drone_pool[drone_id] except: raise if not drone.armed:", "drone.armed = True while not drone.armed: time.sleep(1) drone.simple_takeoff(target_height) print (waypoints)", "node.update_drone(drone_id, { \"location\" : {\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt},", "})) print ('took off') return True def land_drone(kwargs): drone_id =", "= kwargs.get(\"db_key\", None) retries = 3 drone = Sim(instance_count, home)", ": {\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": \"FLYING\"}) command_len", "mission') def attach_listener(kwargs): attr = kwargs.get('attr', None) fn = kwargs.get('fn',", ": 'airspeed', \"fn\" : update_airspeed })) mq.put((attach_listener, { \"attach_fn\" :", "drone_id })) def resume_flight(kwargs): drone_id = kwargs.get(\"drone_id\", None) drone =", "('target alt reached') mavparser.create_mission(drone, waypoints) print ('mission acquired') drone.mode =", "}) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'location', \"fn\"", "kwargs.get(\"drone_id\", None) try: drone = drone_pool[drone_id] except: raise if not", "== len(drone.commands): detach_event_listeners(drone, value, \"FINISHED\") return def update_airspeed(self, attr_name, value):", "status}) return def update_location(self, attr_name, value): node.update_drone(drone_id, { \"location\" :", "\"attr\" : 'location', \"fn\" : update_location })) mq.put((attach_listener, { \"attach_fn\"", "1} self.p = None return def connection_string(self): return super(Sim, self).connection_string()[:-4]", "('mission acquired') drone.mode = VehicleMode('AUTO') print ('initiating sequence') print ('in", "= max(drone.commands.__next__ - diff, 0) % len(waypoints) waypoint = waypoints[next_wp]", ": update_heading })) print ('took off') return True def land_drone(kwargs):", "if drone_id not in list(drone_pool.keys()): drone = node.get_drone_by_id(drone_id) location =", "= drone['location'] q.put((create_new_drone, { \"db_key\" : drone_id, \"home\" : location", "if command_len >= wp_len : diff = command_len - wp_len", "q.put((create_new_drone, { \"db_key\" : drone_id, \"home\" : location })) if", "return if drone.commands.__next__ == len(drone.commands): detach_event_listeners(drone, value, \"FINISHED\") return def", "env_test) self.instance = instance if home: self.home = home else:", "attr_name, value): node.update_drone(drone_id, { \"pitch\": value.pitch, 'roll': value.roll, 'yaw': value.yaw", "break print ('target alt reached') mavparser.create_mission(drone, waypoints) print ('mission acquired')", "print \"df: \" + `diff` # print next_wp node.update_drone(drone_id, {", "initialize(): global q, mq, instance_count q = threadrunner.q mq =", "\"OK\", \"id\" : db_key } return res def remove_drone(kwargs): drone_id", "target_height * 0.9: break print ('target alt reached') mavparser.create_mission(drone, waypoints)", "drone_id, \"home\" : location })) if 'status' in list(drone.keys()) and", "{ \"pitch\": value.pitch, 'roll': value.roll, 'yaw': value.yaw }) def update_heading(self,", "retries -= 1 drone_pool[db_key] = drone_conn res = { \"status\"", "self).connection_string()[:-4] + str(5760 + self.instance * 10) def launch(self): home_str", "\"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'heading', \"fn\" : update_heading }))", "detach_event_listeners(drone, value, status): drone.remove_attribute_listener('location', update_location) drone.remove_attribute_listener('airspeed', update_airspeed) drone.remove_attribute_listener('attitude', udpate_attitude) drone.remove_attribute_listener('heading',", "value}) def udpate_attitude(self, attr_name, value): node.update_drone(drone_id, { \"pitch\": value.pitch, 'roll':", "import mavparser import threadrunner drone_pool = {} instance_count = 0", ": drone_id, \"home\" : location })) if 'status' in list(drone.keys())", "udpate_attitude })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'heading',", "Lock import node, time import mavparser import threadrunner drone_pool =", "\"attr\" : 'airspeed', \"fn\" : update_airspeed })) mq.put((attach_listener, { \"attach_fn\"", "drone.simple_takeoff(target_height) print (waypoints) if waypoints: run_mission(drone, target_height, waypoints) def detach_event_listeners(drone,", "Lock() class Sim(SITL, object): def __init__(self, instance=1, home=None): super(Sim, self).download(\"copter\",", "node.get_drone_by_id(drone_id) location = drone['location'] q.put((create_new_drone, { \"db_key\" : drone_id, \"home\"", ": \"Drone in operation\" } del drone_pool[drone_id] return { \"status\"", "print ('in mission') def attach_listener(kwargs): attr = kwargs.get('attr', None) fn", "drone_pool = {} instance_count = 0 env_test = False q", "= len(waypoints) if command_len >= wp_len : diff = command_len", "wp_len : diff = command_len - wp_len next_wp = max(drone.commands.__next__", "{ \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'attitude', \"fn\" : udpate_attitude", "in drone_pool: return { \"status\" : \"ERROR\", \"msg\" : \"Drone", "mq = threadrunner.mq drones = node.get_drones()['drones'] if not drones: return", "str(self.home['lon']) + ',0,353' super(Sim, self).launch([\"--instance\", str(self.instance), \"--home\", home_str], await_ready=True, verbose=not", "= Lock() class Sim(SITL, object): def __init__(self, instance=1, home=None): super(Sim,", ": \"OK\", \"id\" : drone_id } def run_mission(drone, target_height, waypoints):", "drone_id, \"waypoints\" : waypoints[next_waypoint:] })) def create_new_drone(kwargs): global instance_count instance_count", ": \"ERROR\", \"msg\" : \"Drone instance not found\" } drone", "== None and not attach_fn == None: attach_fn(attr, fn) def", "resume_flight(kwargs): drone_id = kwargs.get(\"drone_id\", None) drone = node.get_drone_by_id(drone_id) waypoints =", "\"OK\", \"id\" : drone_id } def run_mission(drone, target_height, waypoints): while", ": \"Drone instance not found\" } drone = drone_pool[drone_id] if", "\"ERROR\", \"msg\" : \"Drone in operation\" } del drone_pool[drone_id] return", "create_new_drone(kwargs): global instance_count instance_count += 1 home = kwargs.get(\"home\", None)", "get_sitl_status(self): return { 'id': self.instance, 'home': self.home } def initialize():", "\"drone_id\" : drone_id })) def resume_flight(kwargs): drone_id = kwargs.get(\"drone_id\", None)", "drone = drone_pool[drone_id] except: raise if not drone.armed: return False", "dronekit import Vehicle, VehicleMode, connect from dronekit_sitl import SITL from", "\" + str(drone.location.global_relative_frame.alt))) if drone.location.global_relative_frame.alt >= target_height * 0.9: break", "= False q = None mq = None lock =", "home_str = str(self.home['lat']) + ',' + str(self.home['lon']) + ',0,353' super(Sim,", "waypoints[next_waypoint:] })) def create_new_drone(kwargs): global instance_count instance_count += 1 home", "diff = command_len - wp_len next_wp = max(drone.commands.__next__ - diff,", "value): node.update_drone(drone_id, { \"heading\": value }) mq.put((attach_listener, { \"attach_fn\" :", "drone.mode == VehicleMode('AUTO'): return { \"status\" : \"ERROR\", \"msg\" :", "None) target_height = kwargs.get(\"target_height\", 10) waypoints = kwargs.get(\"waypoints\", None) try:", "home = kwargs.get(\"home\", None) db_key = kwargs.get(\"db_key\", None) retries =", "attr = kwargs.get('attr', None) fn = kwargs.get('fn', None) attach_fn =", "\"Drone instance not found\" } drone = drone_pool[drone_id] if drone.mode", "value.global_relative_frame.alt}, \"status\": \"FLYING\"}) command_len = len(drone.commands) wp_len = len(waypoints) if", "\"lon\": value.global_relative_frame.lon, \"alt\": value.global_relative_frame.alt}, \"status\": status}) return def update_location(self, attr_name,", "\"home\" : location })) if 'status' in list(drone.keys()) and drone['status']", "drone.remove_attribute_listener('location', update_location) drone.remove_attribute_listener('airspeed', update_airspeed) drone.remove_attribute_listener('attitude', udpate_attitude) drone.remove_attribute_listener('heading', update_heading) node.update_drone(drone_id, {", "str(5760 + self.instance * 10) def launch(self): home_str = str(self.home['lat'])", ": 'attitude', \"fn\" : udpate_attitude })) mq.put((attach_listener, { \"attach_fn\" :", "instance_count = 0 env_test = False q = None mq", "value.roll, 'yaw': value.yaw }) def update_heading(self, attr_name, value): node.update_drone(drone_id, {", "} return res def remove_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None) if", "None and not attr == None and not attach_fn ==", "mavparser.create_mission(drone, waypoints) print ('mission acquired') drone.mode = VehicleMode('AUTO') print ('initiating", "drone_pool[drone_id] if drone.mode == VehicleMode('AUTO'): return { \"status\" : \"ERROR\",", "off') return True def land_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None) try:", "= { \"status\" : \"OK\", \"id\" : db_key } return", "= VehicleMode('AUTO') print ('initiating sequence') print ('in mission') def attach_listener(kwargs):", "\"waypoint\" : waypoint }) if drone.mode == VehicleMode('LAND') and drone.location.global_relative_frame.alt", "command_len >= wp_len : diff = command_len - wp_len next_wp", "while not drone.armed: time.sleep(1) drone.simple_takeoff(target_height) print (waypoints) if waypoints: run_mission(drone,", "SITL from threading import Lock import node, time import mavparser", "def update_location(self, attr_name, value): node.update_drone(drone_id, { \"location\" : {\"lat\": value.global_relative_frame.lat,", "None) try: drone = drone_pool[drone_id] except: raise drone.initialize() drone.mode =", "<reponame>dilinade/DroneSym<filename>dronesym-python/flask-api/src/dronepool.py #DronePool module which handles interaction with SITLs from dronekit", "None) attach_fn = kwargs.get('attach_fn', None) if not fn == None", "print (next_waypoint) q.put((takeoff_drone, { \"drone_id\" : drone_id, \"waypoints\" : waypoints[next_waypoint:]", "in list(drone_pool.keys()): drone = node.get_drone_by_id(drone_id) location = drone['location'] q.put((create_new_drone, {", "value, \"FINISHED\") return def update_airspeed(self, attr_name, value): node.update_drone(drone_id, {\"airspeed\": value})", "{ \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'heading', \"fn\" : update_heading", "str(self.instance), \"--home\", home_str], await_ready=True, verbose=not env_test) def get_sitl_status(self): return {", "return { \"status\" : \"ERROR\", \"msg\" : \"Drone instance not", "q drone_id = kwargs.get(\"drone_id\", None) target_height = kwargs.get(\"target_height\", 10) waypoints", "print (\"Retrying...\") retries -= 1 drone_pool[db_key] = drone_conn res =", ": \"ERROR\", \"msg\" : \"Drone in operation\" } del drone_pool[drone_id]", "node, time import mavparser import threadrunner drone_pool = {} instance_count", "target_height = kwargs.get(\"target_height\", 10) waypoints = kwargs.get(\"waypoints\", None) try: drone", "drone_id = kwargs.get(\"drone_id\", None) drone = node.get_drone_by_id(drone_id) waypoints = []", "handles interaction with SITLs from dronekit import Vehicle, VehicleMode, connect", "drone = drone_pool[drone_id] if drone.mode == VehicleMode('AUTO'): return { \"status\"", "detach_event_listeners(drone, value, \"FINISHED\") return def update_airspeed(self, attr_name, value): node.update_drone(drone_id, {\"airspeed\":", "target alt : \" + str(drone.location.global_relative_frame.alt))) if drone.location.global_relative_frame.alt >= target_height", "update_location) drone.remove_attribute_listener('airspeed', update_airspeed) drone.remove_attribute_listener('attitude', udpate_attitude) drone.remove_attribute_listener('heading', update_heading) node.update_drone(drone_id, { \"location\"", "await_ready=True, verbose=not env_test) def get_sitl_status(self): return { 'id': self.instance, 'home':", "class Sim(SITL, object): def __init__(self, instance=1, home=None): super(Sim, self).download(\"copter\", \"3.3\",", "= drone_pool[drone_id] if drone.mode == VehicleMode('AUTO'): return { \"status\" :", "True def land_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None) try: drone =", "= {\"lat\":6.9271, \"lon\":79.8612, \"alt\": 1} self.p = None return def", ": drone_id } def run_mission(drone, target_height, waypoints): while True: print((\"Reaching", "connect(drone.connection_string(), wait_ready=True) break except: print (\"Retrying...\") retries -= 1 drone_pool[db_key]", "mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'airspeed', \"fn\" :", "attr_name, value): node.update_drone(drone_id, { \"location\" : {\"lat\": value.global_relative_frame.lat, \"lon\": value.global_relative_frame.lon,", "- diff, 0) % len(waypoints) waypoint = waypoints[next_wp] # print", "return for drone_id in drones: if drone_id not in list(drone_pool.keys()):", "db_key = kwargs.get(\"db_key\", None) retries = 3 drone = Sim(instance_count,", "command_len - wp_len next_wp = max(drone.commands.__next__ - diff, 0) %", "1 home = kwargs.get(\"home\", None) db_key = kwargs.get(\"db_key\", None) retries", "takeoff_drone(kwargs): global q drone_id = kwargs.get(\"drone_id\", None) target_height = kwargs.get(\"target_height\",", "\"Drone in operation\" } del drone_pool[drone_id] return { \"status\" :", "else: self.home = {\"lat\":6.9271, \"lon\":79.8612, \"alt\": 1} self.p = None", "def create_new_drone(kwargs): global instance_count instance_count += 1 home = kwargs.get(\"home\",", "q.put((resume_flight, { \"drone_id\" : drone_id })) def resume_flight(kwargs): drone_id =", "drone.armed: time.sleep(1) drone.simple_takeoff(target_height) print (waypoints) if waypoints: run_mission(drone, target_height, waypoints)", "\" + `diff` # print next_wp node.update_drone(drone_id, { \"waypoint\" :", "\"ERROR\", \"msg\" : \"Drone instance not found\" } drone =", "list(drone_pool.keys()): drone = node.get_drone_by_id(drone_id) location = drone['location'] q.put((create_new_drone, { \"db_key\"", "waypoints = kwargs.get(\"waypoints\", None) try: drone = drone_pool[drone_id] except: raise", "= None lock = Lock() class Sim(SITL, object): def __init__(self,", ": waypoint }) if drone.mode == VehicleMode('LAND') and drone.location.global_relative_frame.alt <=", "{ \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'airspeed', \"fn\" : update_airspeed", "{ \"status\" : \"ERROR\", \"msg\" : \"Drone in operation\" }", "print ('initiating sequence') print ('in mission') def attach_listener(kwargs): attr =", "alt reached') mavparser.create_mission(drone, waypoints) print ('mission acquired') drone.mode = VehicleMode('AUTO')", "\"status\" : \"ERROR\", \"msg\" : \"Drone instance not found\" }", "raise drone.initialize() drone.mode = VehicleMode('GUIDED') drone.armed = True while not", "})) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'airspeed', \"fn\"", "drone.initialize() drone.mode = VehicleMode('GUIDED') drone.armed = True while not drone.armed:", "home_str], await_ready=True, verbose=not env_test) def get_sitl_status(self): return { 'id': self.instance,", "land_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None) try: drone = drone_pool[drone_id] except:", "(next_waypoint) q.put((takeoff_drone, { \"drone_id\" : drone_id, \"waypoints\" : waypoints[next_waypoint:] }))", "operation\" } del drone_pool[drone_id] return { \"status\" : \"OK\", \"id\"", "drone.add_attribute_listener, \"attr\" : 'airspeed', \"fn\" : update_airspeed })) mq.put((attach_listener, {", "#DronePool module which handles interaction with SITLs from dronekit import", "drone.location.global_relative_frame.alt >= target_height * 0.9: break print ('target alt reached')", "waypoint }) if drone.mode == VehicleMode('LAND') and drone.location.global_relative_frame.alt <= 0.1:", "len(waypoints) waypoint = waypoints[next_wp] # print \"df: \" + `diff`", "0) % len(waypoints) waypoint = waypoints[next_wp] # print \"df: \"", "try: drone_conn = connect(drone.connection_string(), wait_ready=True) break except: print (\"Retrying...\") retries", "sorted(drone['waypoints']): waypoints.append(drone['waypoints'][wp]) next_waypoint = waypoints.index(drone['waypoint']) print (next_waypoint) q.put((takeoff_drone, { \"drone_id\"", "\"status\": \"FLYING\"}) command_len = len(drone.commands) wp_len = len(waypoints) if command_len", "waypoints) print ('mission acquired') drone.mode = VehicleMode('AUTO') print ('initiating sequence')", "__init__(self, instance=1, home=None): super(Sim, self).download(\"copter\", \"3.3\", verbose=not env_test) self.instance =", "drone.add_attribute_listener, \"attr\" : 'attitude', \"fn\" : udpate_attitude })) mq.put((attach_listener, {", "\"fn\" : update_heading })) print ('took off') return True def", "if drone_id not in drone_pool: return { \"status\" : \"ERROR\",", "global q, mq, instance_count q = threadrunner.q mq = threadrunner.mq", "drone_pool[db_key] = drone_conn res = { \"status\" : \"OK\", \"id\"", "'heading', \"fn\" : update_heading })) print ('took off') return True", "== None and not attr == None and not attach_fn", "= threadrunner.mq drones = node.get_drones()['drones'] if not drones: return for", "} del drone_pool[drone_id] return { \"status\" : \"OK\", \"id\" :", "0 env_test = False q = None mq = None", "(\"Retrying...\") retries -= 1 drone_pool[db_key] = drone_conn res = {", "'airspeed', \"fn\" : update_airspeed })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener,", "node.update_drone(drone_id, { \"heading\": value }) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener,", "= kwargs.get('fn', None) attach_fn = kwargs.get('attach_fn', None) if not fn", ": \" + str(drone.location.global_relative_frame.alt))) if drone.location.global_relative_frame.alt >= target_height * 0.9:", "drone = node.get_drone_by_id(drone_id) location = drone['location'] q.put((create_new_drone, { \"db_key\" :", "return res def remove_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None) if drone_id", "})) def resume_flight(kwargs): drone_id = kwargs.get(\"drone_id\", None) drone = node.get_drone_by_id(drone_id)", "{ \"drone_id\" : drone_id, \"waypoints\" : waypoints[next_waypoint:] })) def create_new_drone(kwargs):", "len(waypoints) if command_len >= wp_len : diff = command_len -", "waypoints[next_wp] # print \"df: \" + `diff` # print next_wp", "- wp_len next_wp = max(drone.commands.__next__ - diff, 0) % len(waypoints)", "= Sim(instance_count, home) drone.launch() while retries > 0: try: drone_conn", "in operation\" } del drone_pool[drone_id] return { \"status\" : \"OK\",", "+ ',' + str(self.home['lon']) + ',0,353' super(Sim, self).launch([\"--instance\", str(self.instance), \"--home\",", "mq, instance_count q = threadrunner.q mq = threadrunner.mq drones =", "'attitude', \"fn\" : udpate_attitude })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener,", "with SITLs from dronekit import Vehicle, VehicleMode, connect from dronekit_sitl", "target_height, waypoints) def detach_event_listeners(drone, value, status): drone.remove_attribute_listener('location', update_location) drone.remove_attribute_listener('airspeed', update_airspeed)", "super(Sim, self).connection_string()[:-4] + str(5760 + self.instance * 10) def launch(self):", "self.instance, 'home': self.home } def initialize(): global q, mq, instance_count", "None) drone = node.get_drone_by_id(drone_id) waypoints = [] for wp in", "drone.add_attribute_listener, \"attr\" : 'heading', \"fn\" : update_heading })) print ('took", "mavparser import threadrunner drone_pool = {} instance_count = 0 env_test", "{ \"attach_fn\" : drone.add_attribute_listener, \"attr\" : 'location', \"fn\" : update_location", "\"fn\" : update_airspeed })) mq.put((attach_listener, { \"attach_fn\" : drone.add_attribute_listener, \"attr\"", "def land_drone(kwargs): drone_id = kwargs.get(\"drone_id\", None) try: drone = drone_pool[drone_id]" ]
[ "1 not in pool: continue for word in pool[key]: for", "pool[key - 1]: dp[word] = max(dp.get(word, 1), dp.get(tmp, 1) +", "# j += 1 # # if length2 - length1", "# length = len(words) # if length < 2: #", "< 2: return length pool = collections.defaultdict(list) # 将字符串按照其长度进行分组 dp", "length2 - length1 == 1 and i == length1: #", "== length1: # # return True # # return False", "class Solution: # # way 1 # def longestStrChain(self, words:", "in sorted(pool.keys()): if key - 1 not in pool: continue", "return True # # return False # # word2 去除任意一个位置的字符后与", "len(words) # if length < 2: # return length #", "@Title: 最长字符串链 (Longest String Chain) # @Author: KivenC # @Date:", "words[i]): # dp[i] = max(dp[i], dp[j] + 1) # return", "way 1 # def longestStrChain(self, words: List[str]) -> int: #", "dp = {} for word in words: pool[len(word)].append(word) for key", "if len(words[i]) - len(words[j]) > 1: # 剪枝 # break", "if word1[i] == word2[j]: # # i += 1 #", "return length # dp = [1 for _ in range(length)]", "for _ in range(length)] # words.sort(key=len) # 按字符串长度递增排序 # for", "if word2[: i] + word2[i + 1:] == word1: #", "# def isPre(self, word1: str, word2: str) -> bool: #", "False # way 2 def longestStrChain(self, words: List[str]) -> int:", "= max(dp.get(word, 1), dp.get(tmp, 1) + 1) return max(dp.values()) if", "# dp = [1 for _ in range(length)] # words.sort(key=len)", "for j in range(i - 1, -1, -1): # if", "# if length2 - length1 == 1 and i ==", "dp[word] = max(dp.get(word, 1), dp.get(tmp, 1) + 1) return max(dp.values())", "in pool[key - 1]: dp[word] = max(dp.get(word, 1), dp.get(tmp, 1)", "collections length = len(words) if length < 2: return length", "@Runtime: 144 ms # @Memory: 13.3 MB class Solution: #", "# if len(word1) + 1 != len(word2): # return False", "for key in sorted(pool.keys()): if key - 1 not in", "KivenC # @Date: 2019-05-26 20:35:25 # @Runtime: 144 ms #", "length2 = 0, 0, len(word1), len(word2) # # while i", "sorted(pool.keys()): if key - 1 not in pool: continue for", "break # if len(words[i]) == len(words[j]): # continue # if", "length = len(words) # if length < 2: # return", "len(word1) + 1 != len(word2): # return False # for", "1 != len(word2): # return False # for i in", "# i, j, length1, length2 = 0, 0, len(word1), len(word2)", "True # return False # way 2 def longestStrChain(self, words:", "< length1 and j < length2: # # if word1[i]", "[1 for _ in range(length)] # words.sort(key=len) # 按字符串长度递增排序 #", "word1 是否是 word2 的前身 # # 双指针 # # i,", "def longestStrChain(self, words: List[str]) -> int: # # 动态规划 #", "- length1 == 1 and i == length1: # #", "2: return length pool = collections.defaultdict(list) # 将字符串按照其长度进行分组 dp =", "# 判断 word1 是否是 word2 的前身 # # 双指针 #", "i < length1 and j < length2: # # if", "isPre(self, word1: str, word2: str) -> bool: # # 判断", "not in pool: continue for word in pool[key]: for j", "False # # word2 去除任意一个位置的字符后与 word1 进行比对 # if len(word1)", "# way 1 # def longestStrChain(self, words: List[str]) -> int:", "word2 去除任意一个位置的字符后与 word1 进行比对 # if len(word1) + 1 !=", "return length pool = collections.defaultdict(list) # 将字符串按照其长度进行分组 dp = {}", "0, len(word1), len(word2) # # while i < length1 and", "# return True # return False # way 2 def", "# if word1[i] == word2[j]: # # i += 1", "# for i in range(1, length): # if i >=", "按字符串长度递增排序 # for i in range(1, length): # if i", "dp = [1 for _ in range(length)] # words.sort(key=len) #", "way 2 def longestStrChain(self, words: List[str]) -> int: import collections", "= {} for word in words: pool[len(word)].append(word) for key in", "return False # for i in range(len(word2)): # if word2[:", "length1 and j < length2: # # if word1[i] ==", "i] + word2[i + 1:] == word1: # return True", "-> bool: # # 判断 word1 是否是 word2 的前身 #", "if self.isPre(words[j], words[i]): # dp[i] = max(dp[i], dp[j] + 1)", "< i 且 words[j] 是 words[i] 的前身) # length =", "# if i >= 1 and words[i] == words[i -", "range(1, length): # if i >= 1 and words[i] ==", "+ 1 != len(word2): # return False # for i", "且 words[j] 是 words[i] 的前身) # length = len(words) #", "判断 word1 是否是 word2 的前身 # # 双指针 # #", "去除任意一个位置的字符后与 word1 进行比对 # if len(word1) + 1 != len(word2):", "word2 的前身 # # 双指针 # # i, j, length1,", "length2: # # if word1[i] == word2[j]: # # i", "# words.sort(key=len) # 按字符串长度递增排序 # for i in range(1, length):", "# 双指针 # # i, j, length1, length2 = 0,", "i += 1 # # j += 1 # #", "if length < 2: # return length # dp =", "# @Title: 最长字符串链 (Longest String Chain) # @Author: KivenC #", "1 and i == length1: # # return True #", "continue # for j in range(i - 1, -1, -1):", "length # dp = [1 for _ in range(length)] #", "bool: # # 判断 word1 是否是 word2 的前身 # #", "+ 1) (0 <= j < i 且 words[j] 是", "dp[i] = max(dp[i], dp[j] + 1) # return max(dp) #", "and words[i] == words[i - 1]: # 去重 # continue", "i 且 words[j] 是 words[i] 的前身) # length = len(words)", "的前身 # # 双指针 # # i, j, length1, length2", "!= len(word2): # return False # for i in range(len(word2)):", "return False # # word2 去除任意一个位置的字符后与 word1 进行比对 # if", "pool: continue for word in pool[key]: for j in range(key):", "while i < length1 and j < length2: # #", "13.3 MB class Solution: # # way 1 # def", "1 # # j += 1 # # if length2", "(Longest String Chain) # @Author: KivenC # @Date: 2019-05-26 20:35:25", "Chain) # @Author: KivenC # @Date: 2019-05-26 20:35:25 # @Runtime:", "# if len(words[i]) == len(words[j]): # continue # if self.isPre(words[j],", "+ word[j + 1:] if tmp in pool[key - 1]:", "# @Memory: 13.3 MB class Solution: # # way 1", "True # # return False # # word2 去除任意一个位置的字符后与 word1", "word[: j] + word[j + 1:] if tmp in pool[key", "in range(1, length): # if i >= 1 and words[i]", "in range(i - 1, -1, -1): # if len(words[i]) -", "and j < length2: # # if word1[i] == word2[j]:", "List[str]) -> int: import collections length = len(words) if length", "# if word2[: i] + word2[i + 1:] == word1:", "range(i - 1, -1, -1): # if len(words[i]) - len(words[j])", "len(word2): # return False # for i in range(len(word2)): #", "for i in range(len(word2)): # if word2[: i] + word2[i", "key - 1 not in pool: continue for word in", "> 1: # 剪枝 # break # if len(words[i]) ==", "最长字符串链 (Longest String Chain) # @Author: KivenC # @Date: 2019-05-26", "words[i] 的前身) # length = len(words) # if length <", "max(dp) # def isPre(self, word1: str, word2: str) -> bool:", "dp[j] + 1) # return max(dp) # def isPre(self, word1:", "是否是 word2 的前身 # # 双指针 # # i, j,", "in range(length)] # words.sort(key=len) # 按字符串长度递增排序 # for i in", "# # while i < length1 and j < length2:", "+= 1 # # j += 1 # # if", "word1[i] == word2[j]: # # i += 1 # #", "# word2 去除任意一个位置的字符后与 word1 进行比对 # if len(word1) + 1", "in range(key): tmp = word[: j] + word[j + 1:]", "_ in range(length)] # words.sort(key=len) # 按字符串长度递增排序 # for i", "= max(dp[i], dp[j] + 1) (0 <= j < i", "ms # @Memory: 13.3 MB class Solution: # # way", "length < 2: return length pool = collections.defaultdict(list) # 将字符串按照其长度进行分组", "pool[len(word)].append(word) for key in sorted(pool.keys()): if key - 1 not", "= 0, 0, len(word1), len(word2) # # while i <", "1 # # if length2 - length1 == 1 and", "dp[j] + 1) (0 <= j < i 且 words[j]", "<= j < i 且 words[j] 是 words[i] 的前身) #", "range(len(word2)): # if word2[: i] + word2[i + 1:] ==", "= max(dp[i], dp[j] + 1) # return max(dp) # def", "< length2: # # if word1[i] == word2[j]: # #", "len(word1), len(word2) # # while i < length1 and j", "1:] if tmp in pool[key - 1]: dp[word] = max(dp.get(word,", "< 2: # return length # dp = [1 for", "import collections length = len(words) if length < 2: return", "i in range(1, length): # if i >= 1 and", "if tmp in pool[key - 1]: dp[word] = max(dp.get(word, 1),", "1]: dp[word] = max(dp.get(word, 1), dp.get(tmp, 1) + 1) return", "max(dp[i], dp[j] + 1) # return max(dp) # def isPre(self,", "collections.defaultdict(list) # 将字符串按照其长度进行分组 dp = {} for word in words:", "word in pool[key]: for j in range(key): tmp = word[:", "longestStrChain(self, words: List[str]) -> int: import collections length = len(words)", "len(word2) # # while i < length1 and j <", "# # if length2 - length1 == 1 and i", "# continue # if self.isPre(words[j], words[i]): # dp[i] = max(dp[i],", "= len(words) # if length < 2: # return length", "for word in words: pool[len(word)].append(word) for key in sorted(pool.keys()): if", "# i += 1 # # j += 1 #", "len(words[i]) - len(words[j]) > 1: # 剪枝 # break #", "# # dp[i] = max(dp[i], dp[j] + 1) (0 <=", "return max(dp) # def isPre(self, word1: str, word2: str) ->", "continue for word in pool[key]: for j in range(key): tmp", "def isPre(self, word1: str, word2: str) -> bool: # #", "length < 2: # return length # dp = [1", "i >= 1 and words[i] == words[i - 1]: #", "length pool = collections.defaultdict(list) # 将字符串按照其长度进行分组 dp = {} for", "== 1 and i == length1: # # return True", "# # way 1 # def longestStrChain(self, words: List[str]) ->", "# return False # # word2 去除任意一个位置的字符后与 word1 进行比对 #", "# return length # dp = [1 for _ in", "in words: pool[len(word)].append(word) for key in sorted(pool.keys()): if key -", "- 1 not in pool: continue for word in pool[key]:", "# continue # for j in range(i - 1, -1,", "in range(len(word2)): # if word2[: i] + word2[i + 1:]", "max(dp[i], dp[j] + 1) (0 <= j < i 且", "in pool[key]: for j in range(key): tmp = word[: j]", "@Author: KivenC # @Date: 2019-05-26 20:35:25 # @Runtime: 144 ms", "是 words[i] 的前身) # length = len(words) # if length", "(0 <= j < i 且 words[j] 是 words[i] 的前身)", "{} for word in words: pool[len(word)].append(word) for key in sorted(pool.keys()):", "# 去重 # continue # for j in range(i -", "longestStrChain(self, words: List[str]) -> int: # # 动态规划 # #", "= word[: j] + word[j + 1:] if tmp in", "key in sorted(pool.keys()): if key - 1 not in pool:", "# break # if len(words[i]) == len(words[j]): # continue #", "length1, length2 = 0, 0, len(word1), len(word2) # # while", "# return True # # return False # # word2", "word2: str) -> bool: # # 判断 word1 是否是 word2", "# # 判断 word1 是否是 word2 的前身 # # 双指针", "# if self.isPre(words[j], words[i]): # dp[i] = max(dp[i], dp[j] +", "1:] == word1: # return True # return False #", "j < i 且 words[j] 是 words[i] 的前身) # length", "# # return True # # return False # #", "# 按字符串长度递增排序 # for i in range(1, length): # if", "@Memory: 13.3 MB class Solution: # # way 1 #", "+ 1) # return max(dp) # def isPre(self, word1: str,", "= collections.defaultdict(list) # 将字符串按照其长度进行分组 dp = {} for word in", "word1 进行比对 # if len(word1) + 1 != len(word2): #", "144 ms # @Memory: 13.3 MB class Solution: # #", "words[i] == words[i - 1]: # 去重 # continue #", "- 1, -1, -1): # if len(words[i]) - len(words[j]) >", "1), dp.get(tmp, 1) + 1) return max(dp.values()) if dp else", "dp[i] = max(dp[i], dp[j] + 1) (0 <= j <", "words[j] 是 words[i] 的前身) # length = len(words) # if", "pool = collections.defaultdict(list) # 将字符串按照其长度进行分组 dp = {} for word", "word1: # return True # return False # way 2", "# return max(dp) # def isPre(self, word1: str, word2: str)", "pool[key]: for j in range(key): tmp = word[: j] +", "def longestStrChain(self, words: List[str]) -> int: import collections length =", "# return False # for i in range(len(word2)): # if", "+= 1 # # if length2 - length1 == 1", "== len(words[j]): # continue # if self.isPre(words[j], words[i]): # dp[i]", "word[j + 1:] if tmp in pool[key - 1]: dp[word]", "+ word2[i + 1:] == word1: # return True #", "len(words[j]): # continue # if self.isPre(words[j], words[i]): # dp[i] =", "j] + word[j + 1:] if tmp in pool[key -", "if len(words[i]) == len(words[j]): # continue # if self.isPre(words[j], words[i]):", "j < length2: # # if word1[i] == word2[j]: #", "= len(words) if length < 2: return length pool =", "tmp = word[: j] + word[j + 1:] if tmp", "# if len(words[i]) - len(words[j]) > 1: # 剪枝 #", "words: List[str]) -> int: # # 动态规划 # # dp[i]", "1 and words[i] == words[i - 1]: # 去重 #", "for j in range(key): tmp = word[: j] + word[j", "去重 # continue # for j in range(i - 1,", "int: import collections length = len(words) if length < 2:", "if length2 - length1 == 1 and i == length1:", "range(key): tmp = word[: j] + word[j + 1:] if", "-1): # if len(words[i]) - len(words[j]) > 1: # 剪枝", "# while i < length1 and j < length2: #", "== word2[j]: # # i += 1 # # j", "# # word2 去除任意一个位置的字符后与 word1 进行比对 # if len(word1) +", "if length < 2: return length pool = collections.defaultdict(list) #", "j, length1, length2 = 0, 0, len(word1), len(word2) # #", "# # return False # # word2 去除任意一个位置的字符后与 word1 进行比对", "word2[i + 1:] == word1: # return True # return", "# # 动态规划 # # dp[i] = max(dp[i], dp[j] +", "# def longestStrChain(self, words: List[str]) -> int: # # 动态规划", "1, -1, -1): # if len(words[i]) - len(words[j]) > 1:", "word in words: pool[len(word)].append(word) for key in sorted(pool.keys()): if key", "str) -> bool: # # 判断 word1 是否是 word2 的前身", "- 1]: dp[word] = max(dp.get(word, 1), dp.get(tmp, 1) + 1)", "words: List[str]) -> int: import collections length = len(words) if", "= [1 for _ in range(length)] # words.sort(key=len) # 按字符串长度递增排序", "word2[: i] + word2[i + 1:] == word1: # return", "+ 1:] if tmp in pool[key - 1]: dp[word] =", "# @Runtime: 144 ms # @Memory: 13.3 MB class Solution:", "# 剪枝 # break # if len(words[i]) == len(words[j]): #", "and i == length1: # # return True # #", "j in range(i - 1, -1, -1): # if len(words[i])", "-> int: import collections length = len(words) if length <", "# way 2 def longestStrChain(self, words: List[str]) -> int: import", ">= 1 and words[i] == words[i - 1]: # 去重", "i in range(len(word2)): # if word2[: i] + word2[i +", "# for j in range(i - 1, -1, -1): #", "length): # if i >= 1 and words[i] == words[i", "的前身) # length = len(words) # if length < 2:", "剪枝 # break # if len(words[i]) == len(words[j]): # continue", "# 将字符串按照其长度进行分组 dp = {} for word in words: pool[len(word)].append(word)", "i, j, length1, length2 = 0, 0, len(word1), len(word2) #", "length1 == 1 and i == length1: # # return", "for word in pool[key]: for j in range(key): tmp =", "动态规划 # # dp[i] = max(dp[i], dp[j] + 1) (0", "word2[j]: # # i += 1 # # j +=", "# # i += 1 # # j += 1", "False # for i in range(len(word2)): # if word2[: i]", "进行比对 # if len(word1) + 1 != len(word2): # return", "length = len(words) if length < 2: return length pool", "# 动态规划 # # dp[i] = max(dp[i], dp[j] + 1)", "- 1]: # 去重 # continue # for j in", "0, 0, len(word1), len(word2) # # while i < length1", "len(words) if length < 2: return length pool = collections.defaultdict(list)", "String Chain) # @Author: KivenC # @Date: 2019-05-26 20:35:25 #", "将字符串按照其长度进行分组 dp = {} for word in words: pool[len(word)].append(word) for", "words.sort(key=len) # 按字符串长度递增排序 # for i in range(1, length): #", "2: # return length # dp = [1 for _", "2 def longestStrChain(self, words: List[str]) -> int: import collections length", "# @Author: KivenC # @Date: 2019-05-26 20:35:25 # @Runtime: 144", "# return False # way 2 def longestStrChain(self, words: List[str])", "len(words[i]) == len(words[j]): # continue # if self.isPre(words[j], words[i]): #", "2019-05-26 20:35:25 # @Runtime: 144 ms # @Memory: 13.3 MB", "i == length1: # # return True # # return", "# # j += 1 # # if length2 -", "range(length)] # words.sort(key=len) # 按字符串长度递增排序 # for i in range(1,", "if key - 1 not in pool: continue for word", "in pool: continue for word in pool[key]: for j in", "1) (0 <= j < i 且 words[j] 是 words[i]", "1 # def longestStrChain(self, words: List[str]) -> int: # #", "str, word2: str) -> bool: # # 判断 word1 是否是", "+ 1:] == word1: # return True # return False", "max(dp.get(word, 1), dp.get(tmp, 1) + 1) return max(dp.values()) if dp", "# for i in range(len(word2)): # if word2[: i] +", "== words[i - 1]: # 去重 # continue # for", "# if length < 2: # return length # dp", "1: # 剪枝 # break # if len(words[i]) == len(words[j]):", "for i in range(1, length): # if i >= 1", "# dp[i] = max(dp[i], dp[j] + 1) (0 <= j", "1]: # 去重 # continue # for j in range(i", "1) # return max(dp) # def isPre(self, word1: str, word2:", "word1: str, word2: str) -> bool: # # 判断 word1", "dp.get(tmp, 1) + 1) return max(dp.values()) if dp else 1", "len(words[j]) > 1: # 剪枝 # break # if len(words[i])", "words: pool[len(word)].append(word) for key in sorted(pool.keys()): if key - 1", "-1, -1): # if len(words[i]) - len(words[j]) > 1: #", "length1: # # return True # # return False #", "words[i - 1]: # 去重 # continue # for j", "# dp[i] = max(dp[i], dp[j] + 1) # return max(dp)", "== word1: # return True # return False # way", "j in range(key): tmp = word[: j] + word[j +", "if len(word1) + 1 != len(word2): # return False #", "return False # way 2 def longestStrChain(self, words: List[str]) ->", "j += 1 # # if length2 - length1 ==", "# # if word1[i] == word2[j]: # # i +=", "@Date: 2019-05-26 20:35:25 # @Runtime: 144 ms # @Memory: 13.3", "- len(words[j]) > 1: # 剪枝 # break # if", "if i >= 1 and words[i] == words[i - 1]:", "Solution: # # way 1 # def longestStrChain(self, words: List[str])", "List[str]) -> int: # # 动态规划 # # dp[i] =", "int: # # 动态规划 # # dp[i] = max(dp[i], dp[j]", "20:35:25 # @Runtime: 144 ms # @Memory: 13.3 MB class", "# @Date: 2019-05-26 20:35:25 # @Runtime: 144 ms # @Memory:", "-> int: # # 动态规划 # # dp[i] = max(dp[i],", "tmp in pool[key - 1]: dp[word] = max(dp.get(word, 1), dp.get(tmp,", "# # 双指针 # # i, j, length1, length2 =", "continue # if self.isPre(words[j], words[i]): # dp[i] = max(dp[i], dp[j]", "return True # return False # way 2 def longestStrChain(self,", "MB class Solution: # # way 1 # def longestStrChain(self,", "# # i, j, length1, length2 = 0, 0, len(word1),", "双指针 # # i, j, length1, length2 = 0, 0,", "self.isPre(words[j], words[i]): # dp[i] = max(dp[i], dp[j] + 1) #" ]
[ "count + 1 cmds.intField(scaleField_UI, v=1+count, e=1) def addThreeUnit(*args): global count", "joint piv = cmds.xform(clavJoint, q=True, ws=True, t=True) cmds.xform(clavController, ws=True, piv=piv)", "# Create ordered hierarchy for x in reversed(range(chainLen)): if x", "0: cmds.error(\"pls, select one relation type\") break newJointName = ogChain[i]", "targetHandle=armikHandle[0]) cmds.addAttr(pvController, at=\"enum\", enumName = \"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(pvController,", "+ \"_fk_anim_grp\", ogChain[x-1] + \"_fk_anim\") # Set orientConstraint _anim controllers", "== \"Leg\": if x == (chainLen-1): cmds.delete(ogChain[chainLen-1] + \"_fk_anim_grp\") def", "= createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=armikHandle[0]) cmds.addAttr(pvController, at=\"enum\", enumName = \"------\",", "ctrlGrp) cmds.parent(ogChain[0] + \"_fk_anim_grp\", ctrlGrp) cmds.parent(switcherLocGrp, rigGrp) def clavSel(scaleClav): #", "r = 1, s = 0) cmds.delete(fk_controller, ch = 1)", "+ coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".visibility\", k=0, l=1) #", "3, 4, 5, 6, 7, 8, 9, 1.0, 1.1, 1.2,", "min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Leg_Foot\", k=1, r=1, min=0, max=1, dv=0.5) #", "place ik controller ikFootControl = cmds.curve(d=2, p=[(0.997, 0, 1.789), (0,", "output to the original joint chain cmds.connectAttr((ogChain[x] + \"_ik.rotate\"), blendColorsNode", "cmds.formLayout(mainLayout, e=1, attachForm = [ (chainMenu_UI, \"left\", 8), (chainMenu_UI, \"top\",", "transform, create offset group and color cmds.scale(ikFootScale, ikFootScale, ikFootScale, ikFootControlGrp)", "\"_fk_anim_grp\", ogChain[x-1] + \"_fk_anim\") # Set orientConstraint _anim controllers with", "r=1, min=0, max=1, dv=0.5) # Parent ikController and PV under", "under _rig_GRP cmds.parent(ikFootControlGrp, pvController + \"_grp\" ,rigGrp) # Set SDK", "= cmds.checkBox(clavCheckbox_UI, q=1, v=0) if side == \"l_\": controllerColor =", "+ \"_grp\") cmds.parent(ikFootControl, ikFootControlGrp) # Set size, freeze transform, create", "else: #print (\"scaleController\", scaleField_UI) legIK(scaleIK, masterIkHandle, HandleName) def armIk(armIkScale, armikHandle,", "ogChain[x] + \"_ikW0\" fkSdkDriven = ogChain[x] + \"_parentConstraint1.\" + ogChain[x]", "+ \"_ik\", ee=ogChain[4] + \"_ik\", sol=\"ikSCsolver\", n=side + \"toe_ikHandle\") #", "dv=0) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1,", "chainMenu_UI = cmds.optionMenu(\"chainMenu_UI\", l=\"Which chain?\", cc=visCheck) cmds.menuItem(l=\"Leg\") cmds.menuItem(l=\"Arm\") constraintCheckBox_UI =", "on ikController cmds.addAttr(ikFootControl, at=\"enum\",enumName = \"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(ikFootControl,", "= cmds.text(l=\"Controllers size\") scaleField_UI = cmds.intField(en=10, v=1, min=1) plusOne_UI =", "orientController = cmds.optionMenu(\"UI_orientControllerMenu\", q=1, v=1) # Create controllers and group", "at=\"short\", min=0, max=1, k=1, r=1) cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE THIS", "k=1, r=1) cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE THIS SHIT cmds.parentConstraint(ogChain[1], switcherLocGrp,", "duplicateChain(*args): global ogChain global chainLen global switcherLoc global side global", "start = cmds.xform(ogChain[0], q=1, ws=1, t=1) mid = cmds.xform(ogChain[1], q=1,", "# Rename shape node shapeList = cmds.listRelatives(crvIkCube, s = True)", "k=0, l=1) cmds.setAttr(switcherLoc[0] + \".rotate\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0]", "ws=1, r=1) cmds.color(clavController, rgb=controllerColor) # Move pivots on clavicle joint", "\"_grp.visibility\", cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver,", "\".output\"), (ogChain[x] + \".rotate\" )) cmds.connectAttr(switcherLoc[0]+\".FKIK_Mode\", blendColorsNode + \".blender\") ikChainBuild(scaleController,", "cmds.parent((ogChain[0] + \"_ik\"), world = True) cmds.setAttr(ogChain[0] + \"_ik.visibility\", 0)", "(-0.784, 0,-2.5), (0, 0,-3), (0.784, 0, -2.5), (1.108, 0, 0),", "mo=1) #remove .t, .r, .s and .v from the channelbox", "\"_ik\", ikFootControlGrp)) cmds.color(ikFootControl, rgb=controllerColor) # pivot snapping on ankle joint", "for now it's ok chainLen = 3 #suffix for the", "side == \"r_\": controllerColor = rgb=(255, 0, 0) if chainMenu", "q=1, ws=1, t=1) startV = om.MVector(start[0], start[1], start[2]) midV =", "and put it at the top of the chain clavJoint", "\"top\", 5, constraintCheckBox_UI), (scaleField_UI, \"top\", 5, separator01), (scaleControllerText, \"top\", 8,", "r=1) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Clav_Hand\", k=1,", "rigGrp) # Execute if blendCheckbox == 1: blendNodeFunc(scaleController, chainMenu) if", "Execute if blendCheckbox == 1: blendNodeFunc(scaleController, chainMenu) if constraintCheckBox ==", "\"hand_ikHandle\", sol=\"ikSCsolver\") cmds.parent(handikHandle[0], armikHandle[0]) #create IK controller ---> CUBE crvIkCube", "Create some blendColors node with the same name of the", "r=1) cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE THIS SHIT cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1)", "fk_controller = cmds.circle(n=ogChain[y] + \"_fk_anim\")[0] # If not [0] it'll", "into blendColors channels and then connect the output to the", "blendCheckbox_UI global plusOne_UI global plusThree_UI global clavCheckbox_UI if cmds.window(\"switchModeUI\", ex", "(orientControllerMenu, \"left\", 8), (orientControllerMenu, \"top\", 5), #-------------------- (execButton, \"bottom\", 5),", "and +3 count = 0 def addOneUnit(*args): global count count", "at=\"enum\",enumName = \"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Twist\", k=1, r=1)", "it at the top of the chain clavJoint = cmds.pickWalk(ogChain[0],", "count = count + 3 cmds.intField(scaleField_UI, v=1+count, e=1) def blendNodeFunc(scaleController,", "clavSel(scaleClav): # Select clavicle Joint moving up and put it", "ogChain[x] + \"_parentConstraint1.\" + ogChain[x] + \"_fkW1\" # Setup SDK", "their position and freeze transform cmds.joint(n = newJointName) cmds.matchTransform(newJointName, ogChain[i])", "v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=1, dv=1)", "in range(chainLen): blendColorsNode = cmds.createNode(\"blendColors\", n = ogChain[x] + \"_blend\")", "(-0.997,0,1.789), (-1.108, 0, 0), (-0.784, 0,-2.5), (0, 0,-3), (0.784, 0,", "is totally unscalable but for now it's ok chainLen =", "= False cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd) # Buttons +1 and", "cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 0)", "for bone in [\"Ankle\", \"Ball\", \"Toe_Tap\"]: cmds.addAttr(ikFootControl, at=\"enum\", enumName =", "(0, 0, 2.39)], k=[0,1,2,3,4,5,6,7,8,9,10], n=side + \"leg_anim_ik\") # Rename shape", "# Set controller orientation based on second axis if orientController", "[\"X\", \"Y\", \"Z\"]: cmds.addAttr(ikFootControl, ln=bone+coord, k=1, r=1) # Parent ikController", "separator02), ], attachPosition = [#(clavCheckbox_UI, \"right\", 0, 10), (constraintCheckBox_UI, \"left\",", "\"_scale\"] for newJoint in newJointList: for i in range(chainLen): if", "Constraint cmds.delete(cmds.pointConstraint(loc, locGrp)) cmds.parent(loc, locGrp) cmds.makeIdentity(loc, a=1, t=1, r=1, s=1)", "for x in range(chainLen): # Setup orient constraints cmds.parentConstraint((ogChain[x] +", "# Parent ikController and PV under _rig_GRP cmds.parent(crvIkCubeGrp, pvController +", "#---------- Making Pole Vector -------------# # Pole Vector controller --->", "scaleField_UI) armIk(scaleIK, masterIkHandle, HandleName) else: #print (\"scaleController\", scaleField_UI) legIK(scaleIK, masterIkHandle,", "/ float(startEnd.length()) startEndN = startEnd.normal() projV = startEndN * proj", "ikFootControl + \"Shape\") ikFootControlGrp = cmds.group(em=1, n=ikFootControl + \"_grp\") cmds.parent(ikFootControl,", "q=1, v=1) clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1, v=0) if side ==", "(-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (0.5,", "- startV dotP = startMid * startEnd proj = float(dotP)", "= cmds.xform(ogChain[1], q=1, ws=1, t=1) end = cmds.xform(ogChain[2], q=1, ws=1,", "\"_fkW1\" # Setup SDK cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0)", "the same name of the joint for x in range(chainLen):", "= om.MVector(mid[0], mid[1], mid[2]) endV = om.MVector(end[0], end[1], end[2]) startEnd", "n=side + \"toe_ikHandle\") # Create and place ik controller ikFootControl", "Joint moving up and put it at the top of", "snap it between two joints switcherLoc = cmds.spaceLocator(n=side + chainMenu", "newJointName = ogChain[i] + newJoint #create a joint, copy their", "clavJoint) # Parent ik and fk chain under clavicle controller", "cmds.rotate(0,90,0, fk_controller) if orientController == \"z\": cmds.rotate(0,0,90, fk_controller) # Freeze", "and color cmds.scale(ikFootScale, ikFootScale, ikFootScale, ikFootControlGrp) cmds.move(0,-3.2,0, ikFootControl, r=1) cmds.makeIdentity(ikFootControl,", "global blendCheckbox_UI global plusOne_UI global plusThree_UI global clavCheckbox_UI if cmds.window(\"switchModeUI\",", "newJoint in newJointList: for i in range(chainLen): if blendCheckbox ==", "global clavCheckbox global rigGrp, ctrlGrp ogRootchain = cmds.ls(sl = True,", "v=1) blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1, v=1) constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1,", "of the joint for x in range(chainLen): blendColorsNode = cmds.createNode(\"blendColors\",", "constraintFunc)) cmds.formLayout(mainLayout, e=1, attachForm = [ (chainMenu_UI, \"left\", 8), (chainMenu_UI,", "else: #this is totally unscalable but for now it's ok", "\"left\", 8), (orientControllerMenu, \"top\", 5), #-------------------- (execButton, \"bottom\", 5), (execButton,", "in range(chainLen): anim_group = cmds.group(em=1, n=ogChain[y] + \"_fk_anim_grp\") fk_controller =", "(plusOne_UI, \"right\", 0, 45), (plusThree_UI, \"right\", 0, 49) ] )", "snapping on ankle joint piv = cmds.xform(ogChain[2], q=True, ws=True, t=True)", "t=True) cmds.xform(ikFootControl, ws=True, piv=piv) cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0], ikFootControl) #---------- Making", ".v from the channelbox for coord in [\"X\", \"Y\", \"Z\"]:", "chainMenu) if clavCheckbox == 1: clavSel(scaleController) else: cmds.parent(ogChain[0] + \"_ik\",", "0, 10), (constraintCheckBox_UI, \"left\", 0, 26), (blendCheckbox_UI, \"right\", 10, 24),", "cmds import maya.OpenMaya as om from functools import partial def", "sol=\"ikSCsolver\", n=side + \"toe_ikHandle\") # Create and place ik controller", "+ \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=0, dv=0)", "blendNodeFunc(scaleController, selectChain): # Create some blendColors node with the same", "\"right\", 0, 45), (plusThree_UI, \"right\", 0, 49) ] ) cmds.showWindow(myWin)", "r=1, min=0, max=1, dv=0.5) # Create attributes on ikController cmds.addAttr(ikFootControl,", "5), (execButton, \"left\", 5), (execButton, \"right\", 5), ], attachControl =", "cmds.intField(scaleField_UI, v=1+count, e=1) def blendNodeFunc(scaleController, selectChain): # Create some blendColors", "cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller)) cmds.parent(fk_controller, anim_group) # Set controller orientation based on", "+ \"_ik\", crvIkCubeGrp)) cmds.color(crvIkCube, rgb=controllerColor) cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp) cmds.parent(armikHandle[0],", "cmds.xform(clavController, ws=True, piv=piv) cmds.xform(clavControllerGrp, ws=True, piv=piv) cmds.orientConstraint(clavController, clavJoint) # Parent", "\"left\", 5), (separator02, \"left\", 1), (separator02, \"right\", 2), #-------------------- (orientControllerMenu,", "5), (chainMenu_UI, \"right\", 80), (clavCheckbox_UI, \"top\", 7), (blendCheckbox_UI, \"left\", 5),", "asd = True if vis == \"Leg\": asd = False", "ln=bone+coord, k=1, r=1) # Parent ikController and PV under _rig_GRP", "and then make Constraint cmds.delete(cmds.pointConstraint(loc, locGrp)) cmds.parent(loc, locGrp) cmds.makeIdentity(loc, a=1,", "legikHandle[0], ikFootControl) #---------- Making Pole Vector -------------# # Pole Vector", "ikHandJoint, r=1, os=1) cmds.parent(ikHandJoint, ogChain[2] + \"_ik\") handikHandle = cmds.ikHandle(sj=ogChain[2]", "joint finalV = arrowV + midV cmds.xform(loc, ws=1, t=(finalV.x, finalV.y", "cmds.checkBox(constraintCheckBox_UI, q=1, v=1) chainMenu = cmds.optionMenu(\"chainMenu_UI\", q=1, v=1) clavCheckbox =", "+ \"_grp\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", crvIkCubeGrp)) cmds.color(crvIkCube, rgb=controllerColor) cmds.scale(armIkScale, armIkScale,", "\"Shape\") crvIkCubeGrp = cmds.group(n=crvIkCube + \"_grp\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", crvIkCubeGrp))", "chainMenu = cmds.optionMenu(\"chainMenu_UI\", q=1, v=1) clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1, v=0)", "blendColors channels and then connect the output to the original", "0, 26), (blendCheckbox_UI, \"right\", 10, 24), (scaleControllerText, \"left\", 5, 0),", "ws=True, piv=piv) cmds.xform(clavControllerGrp, ws=True, piv=piv) cmds.orientConstraint(clavController, clavJoint) # Parent ik", "\"_ikfk_Switch\") switcherLocGrp = cmds.group(em=1, n=switcherLoc[0] + \"_grp\") cmds.color(switcherLoc, rgb=(255, 255,", "2, 3, 4, 5, 6, 7, 8, 9, 1.0, 1.1,", "pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=armikHandle[0]) cmds.addAttr(pvController, at=\"enum\", enumName =", "a=1, t=1, r=1, s=1) cmds.color(loc, rgb=controllerColor) cmds.poleVectorConstraint(loc, targetHandle) def showUI():", "cmds.xform(ogChain[2], q=1, ws=1, t=1) startV = om.MVector(start[0], start[1], start[2]) midV", "5, 0), (scaleField_UI, \"left\", 110, 0), #(scaleField_UI, \"right\",0, 40), (plusOne_UI,", "= True, type = \"joint\") ogChain.append(ogRootchain) ogChain.reverse() side = ogRootchain[0:2]", "separator01), (separator02, \"top\", 6, scaleField_UI), (orientControllerMenu, \"top\", 6, separator02), ],", "cmds.setAttr(switcherLoc[0] + \".visibility\", k=0, l=1) # Create hierarchy groups rigGrp", "cmds.parent((ogChain[0]+\"_fk_anim_grp\"),(ogChain[0] + \"_ik\"), (ogChain[0] + \"_fk\"), clavController) cmds.parent(clavControllerGrp, ctrlGrp) def", "0) if side == \"l_\": cmds.move(10,0,0, ikHandJoint, r=1, os=1) else:", "cmds.delete(ikFootControl, ch = 1) cmds.delete(cmds.pointConstraint(ogChain[3] + \"_ik\", ikFootControlGrp)) cmds.color(ikFootControl, rgb=controllerColor)", "and PV under _rig_GRP cmds.parent(ikFootControlGrp, pvController + \"_grp\" ,rigGrp) #", "[\"X\", \"Y\", \"Z\"]: #cmds.setAttr(fk_controller + \".translate\" + x, k=0, l=1)", "armikHandle[0]) #create IK controller ---> CUBE crvIkCube = cmds.curve(d=1, p=[(-0.5,", "cmds.parent(ikFootControlGrp, pvController + \"_grp\" ,rigGrp) # Set SDK visibility sdkDriver", "= endV - startV startMid = midV - startV dotP", "HandleName) else: #print (\"scaleController\", scaleField_UI) legIK(scaleIK, masterIkHandle, HandleName) def armIk(armIkScale,", "v=0, dv=0) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1)", "cmds.poleVectorConstraint(loc, targetHandle) def showUI(): global chainMenu_UI global scaleField_UI global orientControllerMenu", "= cmds.checkBox(blendCheckbox_UI, q=1, v=1) constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1, v=1) chainMenu", "attachForm = [ (chainMenu_UI, \"left\", 8), (chainMenu_UI, \"top\", 5), (chainMenu_UI,", "+ 1 cmds.intField(scaleField_UI, v=1+count, e=1) def addThreeUnit(*args): global count count", "+ \"_ikHandle\") cmds.setAttr(masterIkHandle[0] + \".visibility\", 0) if HandleName == \"Arm\":", "ankle joint piv = cmds.xform(ogChain[2], q=True, ws=True, t=True) cmds.xform(ikFootControl, ws=True,", "startEnd proj = float(dotP) / float(startEnd.length()) startEndN = startEnd.normal() projV", "(scaleControllerText, \"left\", 5, 0), (scaleField_UI, \"left\", 110, 0), #(scaleField_UI, \"right\",0,", "\".translate\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".rotate\" + coord,", "HandleName) def armIk(armIkScale, armikHandle, pvName): ikHandJoint = cmds.joint(n=side + \"hand_ik\")", "clavController) cmds.makeIdentity(clavController, a=1) cmds.move(0,10,0, clavControllerGrp, ws=1, r=1) cmds.color(clavController, rgb=controllerColor) #", "v=0, dv=0) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver,", "0.5, -0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5,", "# Lock .t and .s attributes #for x in [\"X\",", "+ x, k=0, l=1) # Create ordered hierarchy for x", "cmds.deleteUI(\"switchModeUI\") myWin = cmds.window(\"switchModeUI\", t=\"IKFK Builder\", w=300, h=300, s=1) mainLayout", "+ \"_fkW1\" # Setup SDK cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0,", "ctrlGrp ogRootchain = cmds.ls(sl = True, type = \"joint\")[0] ogChain", "ctrlUI_lib import createClav2, createSphere import maya.cmds as cmds import maya.OpenMaya", "\"_ik\"), world = True) cmds.setAttr(ogChain[0] + \"_ik.visibility\", 0) cmds.setAttr(ogChain[0] +", "dv=1) def legIK(ikFootScale, legikHandle, pvName): ballikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\",", "startMid = midV - startV dotP = startMid * startEnd", "cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp)) cmds.addAttr(switcherLoc, ln=\"FKIK_Mode\", at=\"short\", min=0, max=1, k=1, r=1)", "1.789), (0, 0, 2.39), (-0.997,0,1.789), (-1.108, 0, 0), (-0.784, 0,-2.5),", "+ \"_ik\", ee=ogChain[3] + \"_ik\", sol=\"ikSCsolver\", n=side + \"ball_ikHandle\") toeikHandle", "5), (separator02, \"left\", 1), (separator02, \"right\", 2), #-------------------- (orientControllerMenu, \"left\",", "v=1, dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def findPoleVector(loc,", "cmds.delete(cmds.pointConstraint(loc, locGrp)) cmds.parent(loc, locGrp) cmds.makeIdentity(loc, a=1, t=1, r=1, s=1) cmds.color(loc,", "ws=True, t=True) cmds.xform(clavController, ws=True, piv=piv) cmds.xform(clavControllerGrp, ws=True, piv=piv) cmds.orientConstraint(clavController, clavJoint)", "(execButton, \"right\", 5), ], attachControl = [(clavCheckbox_UI, \"left\", 10, chainMenu_UI),", "def armIk(armIkScale, armikHandle, pvName): ikHandJoint = cmds.joint(n=side + \"hand_ik\") cmds.delete(cmds.parentConstraint(ogChain[2]", "ikController and PV under _rig_GRP cmds.parent(crvIkCubeGrp, pvController + \"_grp\" ,rigGrp)", "start[2]) midV = om.MVector(mid[0], mid[1], mid[2]) endV = om.MVector(end[0], end[1],", "(0.5, -0.5, 0.5), (0.5, -0.5, -0.5), (-0.5, -0.5, -0.5)], k=[0,", "cmds.connectAttr((blendColorsNode + \".output\"), (ogChain[x] + \".rotate\" )) cmds.connectAttr(switcherLoc[0]+\".FKIK_Mode\", blendColorsNode +", "= cmds.xform(ogChain[0], q=1, ws=1, t=1) mid = cmds.xform(ogChain[1], q=1, ws=1,", "float(dotP) / float(startEnd.length()) startEndN = startEnd.normal() projV = startEndN *", "armIk(scaleIK, masterIkHandle, HandleName) else: #print (\"scaleController\", scaleField_UI) legIK(scaleIK, masterIkHandle, HandleName)", "createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=armikHandle[0]) cmds.addAttr(pvController, at=\"enum\", enumName = \"------\", ln=\"Attributes\",", "cmds.color(loc, rgb=controllerColor) cmds.poleVectorConstraint(loc, targetHandle) def showUI(): global chainMenu_UI global scaleField_UI", "\"_grp.visibility\", cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver,", "global plusThree_UI global clavCheckbox_UI if cmds.window(\"switchModeUI\", ex = 1): cmds.deleteUI(\"switchModeUI\")", "def addThreeUnit(*args): global count count = count + 3 cmds.intField(scaleField_UI,", "connect the output to the original joint chain cmds.connectAttr((ogChain[x] +", "select one relation type\") break newJointName = ogChain[i] + newJoint", "+ \"_ik\"), world = True) cmds.setAttr(ogChain[0] + \"_ik.visibility\", 0) cmds.setAttr(ogChain[0]", "#this is totally unscalable but for now it's ok chainLen", "= rgb=(0, 0, 255) elif side == \"r_\": controllerColor =", "import partial def duplicateChain(*args): global ogChain global chainLen global switcherLoc", "a = 1, t = 1, r = 1, s", "== 1: constraintFunc(scaleController, chainMenu) if clavCheckbox == 1: clavSel(scaleController) else:", "global count count = count + 1 cmds.intField(scaleField_UI, v=1+count, e=1)", "#print (\"scaleController\", scaleField_UI) armIk(scaleIK, masterIkHandle, HandleName) else: #print (\"scaleController\", scaleField_UI)", "create offset group and color cmds.scale(ikFootScale, ikFootScale, ikFootScale, ikFootControlGrp) cmds.move(0,-3.2,0,", "= createClav2(clavJoint + \"_anim\") # Import coordinates from ctrlUI_lib cmds.delete(cmds.pointConstraint(clavJoint,", "0, 255) elif side == \"r_\": controllerColor = rgb=(255, 0,", "+ newJoint #create a joint, copy their position and freeze", "= cmds.intField(en=10, v=1, min=1) plusOne_UI = cmds.button(l=\"+1\", c=addOneUnit) plusThree_UI =", "cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=1,", "clavCheckbox_UI if cmds.window(\"switchModeUI\", ex = 1): cmds.deleteUI(\"switchModeUI\") myWin = cmds.window(\"switchModeUI\",", "+ \"_grp\", em=1) cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp)) cmds.parent(clavController, clavControllerGrp) fixedScale = scaleClav/4", "anim_group = cmds.group(em=1, n=ogChain[y] + \"_fk_anim_grp\") fk_controller = cmds.circle(n=ogChain[y] +", "+ HandleName + \"_ikHandle\") cmds.setAttr(masterIkHandle[0] + \".visibility\", 0) if HandleName", "-0.5, -0.5), (-0.5, -0.5, -0.5)], k=[0, 1, 2, 3, 4,", "cmds.parent(ogChain[x] + \"_fk_anim_grp\", ogChain[x-1] + \"_fk_anim\") # Set orientConstraint _anim", "and set color cmds.makeIdentity(fk_controller, a = 1, t = 1,", "(separator02, \"left\", 1), (separator02, \"right\", 2), #-------------------- (orientControllerMenu, \"left\", 8),", "chainMenu + \"_ikfk_Switch\") switcherLocGrp = cmds.group(em=1, n=switcherLoc[0] + \"_grp\") cmds.color(switcherLoc,", "\"right\", 2), #-------------------- (scaleField_UI, \"right\", 65), (scaleField_UI, \"left\", 5), (plusOne_UI,", "cmds.spaceLocator(n=side + chainMenu + \"_ikfk_Switch\") switcherLocGrp = cmds.group(em=1, n=switcherLoc[0] +", "cmds.curve(d=2, p=[(0.997, 0, 1.789), (0, 0, 2.39), (-0.997,0,1.789), (-1.108, 0,", "= 1, t = 1, r = 1, s =", "= switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver,", "= cmds.listRelatives(ikFootControl, s = True) cmds.rename(shapeList, ikFootControl + \"Shape\") ikFootControlGrp", "-0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5), (0.5, -0.5,", "6, separator02), ], attachPosition = [#(clavCheckbox_UI, \"right\", 0, 10), (constraintCheckBox_UI,", "cmds.parent(ogChain[0] + \"_ik\", ogChain[0] + \"_fk\", ctrlGrp) cmds.parent(ogChain[0] + \"_fk_anim_grp\",", "x in reversed(range(chainLen)): if x == 0: continue cmds.parent(ogChain[x] +", "(blendCheckbox_UI, \"right\", 10, 24), (scaleControllerText, \"left\", 5, 0), (scaleField_UI, \"left\",", "cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ogChain[0] +", "blendCheckbox == 1: blendNodeFunc(scaleController, chainMenu) if constraintCheckBox == 1: constraintFunc(scaleController,", "n=ikFootControl + \"_grp\") cmds.parent(ikFootControl, ikFootControlGrp) # Set size, freeze transform,", "0, 2.39), (-0.997,0,1.789), (-1.108, 0, 0), (-0.784, 0,-2.5), (0, 0,-3),", "0), (-0.784, 0,-2.5), (0, 0,-3), (0.784, 0, -2.5), (1.108, 0,", "clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1, v=0) if side == \"l_\": controllerColor", "for coord in [\"X\", \"Y\", \"Z\"]: cmds.addAttr(ikFootControl, ln=bone+coord, k=1, r=1)", "\"top\", 5, separator01), (scaleControllerText, \"top\", 8, separator01), (plusOne_UI, \"top\", 4,", "k=0, l=1) cmds.setAttr(switcherLoc[0] + \".visibility\", k=0, l=1) # Create hierarchy", "= [#(clavCheckbox_UI, \"right\", 0, 10), (constraintCheckBox_UI, \"left\", 0, 26), (blendCheckbox_UI,", "cmds.menuItem(l=\"y\") cmds.menuItem(l=\"z\") # Scale the UI becase you'll never know", "the joint for x in range(chainLen): # Setup orient constraints", "selectChain) def fkControllerCreator(fkSize, legOrArm): orientController = cmds.optionMenu(\"UI_orientControllerMenu\", q=1, v=1) #", "cmds.menuItem(l=\"Arm\") constraintCheckBox_UI = cmds.checkBox(label = \"orientConsts+SDK Mode\", v=0, cc= lambda", "cmds.addAttr(ikFootControl, ln=bone+coord, k=1, r=1) # Parent ikController and PV under", "(plusOne_UI, \"right\", 5), (plusThree_UI, \"right\", 5), (scaleControllerText, \"left\", 5), (separator02,", "controller ikFootControl = cmds.curve(d=2, p=[(0.997, 0, 1.789), (0, 0, 2.39),", "selecting which chain: Leg or Arm? chainMenu_UI = cmds.optionMenu(\"chainMenu_UI\", l=\"Which", "\"top\", 4, separator01), (separator02, \"top\", 6, scaleField_UI), (orientControllerMenu, \"top\", 6,", "== \"l_\": controllerColor = rgb=(0, 0, 255) elif side ==", "naming convention sdkDriver = switcherLoc[0] + \".FKIK_Mode\" ikSdkDriven = ogChain[x]", "and fk chain under clavicle controller cmds.parent((ogChain[0]+\"_fk_anim_grp\"),(ogChain[0] + \"_ik\"), (ogChain[0]", "0,-3), (0.784, 0, -2.5), (1.108, 0, 0), (0.997, 0, 1.789),", "ogChain.append(ogRootchain) ogChain.reverse() side = ogRootchain[0:2] # Initialize input from UI", "Create hierarchy groups rigGrp = cmds.group(em=1, n= side + chainMenu", "scaleField_UI global orientControllerMenu global constraintCheckBox_UI global blendCheckbox_UI global plusOne_UI global", "totally unscalable but for now it's ok chainLen = 3", "\"_ik\", ee=ogChain[4] + \"_ik\", sol=\"ikSCsolver\", n=side + \"toe_ikHandle\") # Create", "x in range(chainLen): blendColorsNode = cmds.createNode(\"blendColors\", n = ogChain[x] +", "on ankle joint piv = cmds.xform(ogChain[2], q=True, ws=True, t=True) cmds.xform(ikFootControl,", "#deselect to make the two different hierarchies cmds.select(cl = 1)", "= 1, s = 0) if side == \"l_\": cmds.move(10,0,0,", "two different hierarchies cmds.select(cl = 1) cmds.parent((ogChain[0] + \"_ik\"), world", "ln=\"Follow_Clav_Hand\", k=1, r=1, min=0, max=1, dv=0.5) # Parent ikController and", "\"_grp\", em=1) cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp)) cmds.parent(clavController, clavControllerGrp) fixedScale = scaleClav/4 cmds.scale(fixedScale,", "+ \"_fk\") # If leg chain is selected delete toe", "ogChain global chainLen global switcherLoc global side global controllerColor global", ".s attributes #for x in [\"X\", \"Y\", \"Z\"]: #cmds.setAttr(fk_controller +", "float(startEnd.length()) startEndN = startEnd.normal() projV = startEndN * proj arrowV", "startMid - projV arrowV*= 10 #distance from joint finalV =", "\"_ik\", sol=\"ikRPsolver\", n=side + HandleName + \"_ikHandle\") cmds.setAttr(masterIkHandle[0] + \".visibility\",", "cmds.checkBox(clavCheckbox_UI, q=1, v=0) if side == \"l_\": controllerColor = rgb=(0,", "count + 3 cmds.intField(scaleField_UI, v=1+count, e=1) def blendNodeFunc(scaleController, selectChain): #", "== 1: clavSel(scaleController) else: cmds.parent(ogChain[0] + \"_ik\", ogChain[0] + \"_fk\",", "\"left\", 10, chainMenu_UI), (constraintCheckBox_UI, \"top\", 5, chainMenu_UI), (blendCheckbox_UI, \"top\", 5,", "projV = startEndN * proj arrowV = startMid - projV", "on clavicle joint piv = cmds.xform(clavJoint, q=True, ws=True, t=True) cmds.xform(clavController,", "cmds.optionMenu(\"chainMenu_UI\", q=1, v=1) clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1, v=0) if side", "global side global controllerColor global clavCheckbox global rigGrp, ctrlGrp ogRootchain", "= startEndN * proj arrowV = startMid - projV arrowV*=", "rgb=controllerColor) cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp) cmds.parent(armikHandle[0], crvIkCube) pvController = createSphere(nome=", "in range(chainLen): cmds.parentConstraint(ogChain[x] + \"_fk_anim\", ogChain[x] + \"_fk\") # If", "= cmds.spaceLocator(n=side + chainMenu + \"_ikfk_Switch\") switcherLocGrp = cmds.group(em=1, n=switcherLoc[0]", "= \"joint\") ogChain.append(ogRootchain) ogChain.reverse() side = ogRootchain[0:2] # Initialize input", "dv=0) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(crvIkCubeGrp", "ogChain[x]) cmds.parentConstraint((ogChain[x] + \"_fk\"), ogChain[x]) # Setup SDK naming convention", "history and set color cmds.makeIdentity(fk_controller, a = 1, t =", "= \"blendColor Mode\", v=0, cc= lambda state: (cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1)))", "the user wishes. Maybe this can be improved orientControllerMenu =", "0.5), (-0.5, -0.5, -0.5), (-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5),", "fkSize, fk_controller) cmds.matchTransform(anim_group, ogChain[y]) cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller)) cmds.parent(fk_controller, anim_group) # Set", "side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=legikHandle[0]) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0, max=1) cmds.addAttr(pvController,", "5), #-------------------- (execButton, \"bottom\", 5), (execButton, \"left\", 5), (execButton, \"right\",", "cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=0,", "max=1, k=1, r=1) cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE THIS SHIT cmds.parentConstraint(ogChain[1],", "cmds.setAttr(masterIkHandle[0] + \".visibility\", 0) if HandleName == \"Arm\": #print (\"scaleController\",", "#suffix for the new chains newJointList = [\"_ik\", \"_fk\", \"_scale\"]", "min=1) plusOne_UI = cmds.button(l=\"+1\", c=addOneUnit) plusThree_UI = cmds.button(l=\"+3\", c=addThreeUnit) separator01", "1, t = 1, r = 1, s = 1)", "attachPosition = [#(clavCheckbox_UI, \"right\", 0, 10), (constraintCheckBox_UI, \"left\", 0, 26),", "or Arm? chainMenu_UI = cmds.optionMenu(\"chainMenu_UI\", l=\"Which chain?\", cc=visCheck) cmds.menuItem(l=\"Leg\") cmds.menuItem(l=\"Arm\")", "ch = 1) cmds.delete(cmds.pointConstraint(ogChain[3] + \"_ik\", ikFootControlGrp)) cmds.color(ikFootControl, rgb=controllerColor) #", "cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(fkSdkDriven,", "l=1) cmds.setAttr(switcherLoc[0] + \".visibility\", k=0, l=1) # Create hierarchy groups", "0) if HandleName == \"Arm\": #print (\"scaleController\", scaleField_UI) armIk(scaleIK, masterIkHandle,", "max=1, dv=0.5) # Parent ikController and PV under _rig_GRP cmds.parent(crvIkCubeGrp,", "+ \"_ik\", sol=\"ikRPsolver\", n=side + HandleName + \"_ikHandle\") cmds.setAttr(masterIkHandle[0] +", "---> CUBE crvIkCube = cmds.curve(d=1, p=[(-0.5, 0.5, -0.5), (0.5, 0.5,", "rigGrp, ctrlGrp ogRootchain = cmds.ls(sl = True, type = \"joint\")[0]", "1, s = 0) #deselect to make the two different", "\"Leg\": asd = False cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd) # Buttons", "r = 1, s = 1) cmds.delete(ikFootControl, ch = 1)", "= cmds.listRelatives(ogRootchain, ad = True, type = \"joint\") ogChain.append(ogRootchain) ogChain.reverse()", "v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=1, dv=1)", "= 0 def addOneUnit(*args): global count count = count +", "cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ogChain[3] + \"_ik\", sol=\"ikSCsolver\", n=side + \"ball_ikHandle\")", "transform, delete history and set color cmds.makeIdentity(fk_controller, a = 1,", "Buttons +1 and +3 count = 0 def addOneUnit(*args): global", "\"_ik\"), ogChain[x]) cmds.parentConstraint((ogChain[x] + \"_fk\"), ogChain[x]) # Setup SDK naming", "Create offset group, FDH and move up clavControllerGrp = cmds.group(n=clavController", "p=[(-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5), (-0.5,", "n=side + HandleName + \"_ikHandle\") cmds.setAttr(masterIkHandle[0] + \".visibility\", 0) if", "+ \".scale\" + x, k=0, l=1) # Create ordered hierarchy", "cmds.delete(cmds.pointConstraint(clavJoint, clavController)) # Create offset group, FDH and move up", "channels and then connect the output to the original joint", "\"_ik\", ikHandJoint)) cmds.makeIdentity(ikHandJoint, a = 1, t = 1, r", "# Set orientConstraint _anim controllers with _fk hierarchy for x", "-0.5, -0.5), (0.5, -0.5, 0.5), (0.5, -0.5, -0.5), (-0.5, -0.5,", "cmds.parent(loc, locGrp) cmds.makeIdentity(loc, a=1, t=1, r=1, s=1) cmds.color(loc, rgb=controllerColor) cmds.poleVectorConstraint(loc,", "cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp)) cmds.parent(switcherLoc, switcherLocGrp) cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp)) cmds.addAttr(switcherLoc, ln=\"FKIK_Mode\", at=\"short\",", "range(chainLen): anim_group = cmds.group(em=1, n=ogChain[y] + \"_fk_anim_grp\") fk_controller = cmds.circle(n=ogChain[y]", "cmds.makeIdentity(loc, a=1, t=1, r=1, s=1) cmds.color(loc, rgb=controllerColor) cmds.poleVectorConstraint(loc, targetHandle) def", "+ \"_anim\") # Import coordinates from ctrlUI_lib cmds.delete(cmds.pointConstraint(clavJoint, clavController)) #", "if vis == \"Leg\": asd = False cmds.checkBox(clavCheckbox_UI, e=1, vis=asd,", "= startMid - projV arrowV*= 10 #distance from joint finalV", "midV - startV dotP = startMid * startEnd proj =", "chain cmds.connectAttr((ogChain[x] + \"_ik.rotate\"), blendColorsNode + \".color1\") cmds.connectAttr((ogChain[x] + \"_fk.rotate\"),", "axis if orientController == \"x\": cmds.rotate(90,0,0, fk_controller) if orientController ==", "= \"orientConsts+SDK Mode\", v=0, cc= lambda state: (cmds.checkBox(blendCheckbox_UI, e=1, en=state-1)))", "\"_grp\") cmds.color(switcherLoc, rgb=(255, 255, 0)) #yellow cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp)) cmds.parent(switcherLoc, switcherLocGrp)", "findPoleVector(loc=pvController, targetHandle=armikHandle[0]) cmds.addAttr(pvController, at=\"enum\", enumName = \"------\", ln=\"Attributes\", k=1, r=1)", "q=True, ws=True, t=True) cmds.xform(ikFootControl, ws=True, piv=piv) cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0], ikFootControl)", "group, FDH and move up clavControllerGrp = cmds.group(n=clavController + \"_grp\",", "# Setup SDK cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(fkSdkDriven,", "cmds.addAttr(pvController, ln=\"Follow_Leg_Foot\", k=1, r=1, min=0, max=1, dv=0.5) # Create attributes", "(execButton, \"bottom\", 5), (execButton, \"left\", 5), (execButton, \"right\", 5), ],", "(orientControllerMenu, \"top\", 6, separator02), ], attachPosition = [#(clavCheckbox_UI, \"right\", 0,", "wishes. Maybe this can be improved orientControllerMenu = cmds.optionMenu(\"UI_orientControllerMenu\", l=\"What's", "new chains newJointList = [\"_ik\", \"_fk\", \"_scale\"] for newJoint in", "global controllerColor global clavCheckbox global rigGrp, ctrlGrp ogRootchain = cmds.ls(sl", "\"l_\": controllerColor = rgb=(0, 0, 255) elif side == \"r_\":", "cmds.checkBox(label = \"blendColor Mode\", v=0, cc= lambda state: (cmds.checkBox(constraintCheckBox_UI, e=1,", "cmds.pickWalk(ogChain[0], d=\"up\")[0] #ogChain.insert(0, clavJoint) clavController = createClav2(clavJoint + \"_anim\") #", "ws=True, piv=piv) cmds.orientConstraint(clavController, clavJoint) # Parent ik and fk chain", "in [\"X\", \"Y\", \"Z\"]: cmds.addAttr(ikFootControl, ln=bone+coord, k=1, r=1) # Parent", "#create a joint, copy their position and freeze transform cmds.joint(n", "\"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0, max=1)", "crvIkCube) pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=armikHandle[0]) cmds.addAttr(pvController, at=\"enum\", enumName", "rgb=controllerColor) cmds.poleVectorConstraint(loc, targetHandle) def showUI(): global chainMenu_UI global scaleField_UI global", "cmds.ikHandle(sj=ogChain[0] + \"_ik\", ee=ogChain[2] + \"_ik\", sol=\"ikRPsolver\", n=side + HandleName", "\"_fk_anim_grp.visibility\", cd=sdkDriver, v=0, dv=1) # Lock .t and .s attributes", "Rename shape node shapeList = cmds.listRelatives(crvIkCube, s = True) cmds.rename(shapeList,", "finalV = arrowV + midV cmds.xform(loc, ws=1, t=(finalV.x, finalV.y ,finalV.z))", "1, t = 1, r = 1, s = 0)", "8, 9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5], n=side +", "lambda state: (cmds.checkBox(blendCheckbox_UI, e=1, en=state-1))) blendCheckbox_UI = cmds.checkBox(label = \"blendColor", "ln=\"Follow\", k=1, r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Leg_Foot\", k=1, r=1, min=0,", "= [(clavCheckbox_UI, \"left\", 10, chainMenu_UI), (constraintCheckBox_UI, \"top\", 5, chainMenu_UI), (blendCheckbox_UI,", "Create a locator used for switching IK/FK mode and snap", "chainMenu_UI), (separator01, \"top\", 5, constraintCheckBox_UI), (scaleField_UI, \"top\", 5, separator01), (scaleControllerText,", "#snap, parent offsetGrp, set color and then make Constraint cmds.delete(cmds.pointConstraint(loc,", "r=1) cmds.color(clavController, rgb=controllerColor) # Move pivots on clavicle joint piv", "then make Constraint cmds.delete(cmds.pointConstraint(loc, locGrp)) cmds.parent(loc, locGrp) cmds.makeIdentity(loc, a=1, t=1,", "cmds.addAttr(ikFootControl, ln=\"Lateral_Roll\", k=1, r=1) for bone in [\"Ankle\", \"Ball\", \"Toe_Tap\"]:", "blendCheckbox_UI = cmds.checkBox(label = \"blendColor Mode\", v=0, cc= lambda state:", "== \"y\": cmds.rotate(0,90,0, fk_controller) if orientController == \"z\": cmds.rotate(0,0,90, fk_controller)", "ik controller ikFootControl = cmds.curve(d=2, p=[(0.997, 0, 1.789), (0, 0,", "0 and constraintCheckBox == 0: cmds.error(\"pls, select one relation type\")", "dv=1) # Lock .t and .s attributes #for x in", "1, r = 1, s = 0) cmds.delete(fk_controller, ch =", "-0.5, 0.5), (-0.5, -0.5, -0.5), (-0.5, 0.5, -0.5), (-0.5, 0.5,", "== 0: cmds.error(\"pls, select one relation type\") break newJointName =", "convention sdkDriver = switcherLoc[0] + \".FKIK_Mode\" ikSdkDriven = ogChain[x] +", "== \"l_\": cmds.move(10,0,0, ikHandJoint, r=1, os=1) else: cmds.move(-10,0,0, ikHandJoint, r=1,", "-0.5, -0.5)], k=[0, 1, 2, 3, 4, 5, 6, 7,", "dv=1) ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def fkControllerCreator(fkSize, legOrArm): orientController =", "(execButton, \"left\", 5), (execButton, \"right\", 5), ], attachControl = [(clavCheckbox_UI,", "global clavCheckbox_UI if cmds.window(\"switchModeUI\", ex = 1): cmds.deleteUI(\"switchModeUI\") myWin =", "n= side + chainMenu + \"_ctrl_grp\") cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp)) cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp))", "-0.5), (0.5, -0.5, -0.5), (0.5, -0.5, 0.5), (0.5, -0.5, -0.5),", "om.MVector(mid[0], mid[1], mid[2]) endV = om.MVector(end[0], end[1], end[2]) startEnd =", "cmds.connectAttr((ogChain[x] + \"_ik.rotate\"), blendColorsNode + \".color1\") cmds.connectAttr((ogChain[x] + \"_fk.rotate\"), blendColorsNode", "switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=0,", "cmds.group(em=1, n= side + chainMenu + \"_ctrl_grp\") cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp)) cmds.delete(cmds.parentConstraint(ogChain[0],", "ln=\"Lateral_Roll\", k=1, r=1) for bone in [\"Ankle\", \"Ball\", \"Toe_Tap\"]: cmds.addAttr(ikFootControl,", "ctrlGrp) cmds.parent(switcherLocGrp, rigGrp) def clavSel(scaleClav): # Select clavicle Joint moving", "= cmds.checkBox(constraintCheckBox_UI, q=1, v=1) chainMenu = cmds.optionMenu(\"chainMenu_UI\", q=1, v=1) clavCheckbox", "be improved orientControllerMenu = cmds.optionMenu(\"UI_orientControllerMenu\", l=\"What's the secondary axis\") cmds.menuItem(l=\"x\")", "cmds.group(n=crvIkCube + \"_grp\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", crvIkCubeGrp)) cmds.color(crvIkCube, rgb=controllerColor) cmds.scale(armIkScale,", "4, separator01), (plusThree_UI, \"top\", 4, separator01), (separator02, \"top\", 6, scaleField_UI),", "ikFootScale, ikFootScale, ikFootControlGrp) cmds.move(0,-3.2,0, ikFootControl, r=1) cmds.makeIdentity(ikFootControl, a = 1,", "type = \"joint\")[0] ogChain = cmds.listRelatives(ogRootchain, ad = True, type", "\"_fk.visibility\", 0) # Create a locator used for switching IK/FK", "= 1, s = 0) cmds.delete(fk_controller, ch = 1) cmds.color(fk_controller,", "\".translate\" + x, k=0, l=1) #cmds.setAttr(fk_controller + \".scale\" + x,", "range(chainLen): if blendCheckbox == 0 and constraintCheckBox == 0: cmds.error(\"pls,", "cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1) ikChainBuild(scaleController, selectChain)", "(\"scaleController\", scaleField_UI) legIK(scaleIK, masterIkHandle, HandleName) def armIk(armIkScale, armikHandle, pvName): ikHandJoint", "], attachControl = [(clavCheckbox_UI, \"left\", 10, chainMenu_UI), (constraintCheckBox_UI, \"top\", 5,", "CUBE crvIkCube = cmds.curve(d=1, p=[(-0.5, 0.5, -0.5), (0.5, 0.5, -0.5),", "cmds.window(\"switchModeUI\", ex = 1): cmds.deleteUI(\"switchModeUI\") myWin = cmds.window(\"switchModeUI\", t=\"IKFK Builder\",", "ctrlGrp = cmds.group(em=1, n= side + chainMenu + \"_ctrl_grp\") cmds.delete(cmds.parentConstraint(ogChain[0],", "SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ogChain[0]", "else: cmds.move(-10,0,0, ikHandJoint, r=1, os=1) cmds.parent(ikHandJoint, ogChain[2] + \"_ik\") handikHandle", "q=1, v=1) constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1, v=1) chainMenu = cmds.optionMenu(\"chainMenu_UI\",", "ogChain[i]) cmds.makeIdentity(newJointName, a = 1, t = 0, r =", "+ \".translate\" + x, k=0, l=1) #cmds.setAttr(fk_controller + \".scale\" +", "e=1) def blendNodeFunc(scaleController, selectChain): # Create some blendColors node with", "+1 and +3 count = 0 def addOneUnit(*args): global count", "= cmds.listRelatives(crvIkCube, s = True) cmds.rename(shapeList, crvIkCube + \"Shape\") crvIkCubeGrp", "and then connect the output to the original joint chain", "for switching IK/FK mode and snap it between two joints", "v=1, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver,", "up and put it at the top of the chain", "cmds.addAttr(pvController, ln=\"Follow_Clav_Hand\", k=1, r=1, min=0, max=1, dv=0.5) # Parent ikController", "midV = om.MVector(mid[0], mid[1], mid[2]) endV = om.MVector(end[0], end[1], end[2])", "fixedScale, clavController) cmds.makeIdentity(clavController, a=1) cmds.move(0,10,0, clavControllerGrp, ws=1, r=1) cmds.color(clavController, rgb=controllerColor)", "4, separator01), (separator02, \"top\", 6, scaleField_UI), (orientControllerMenu, \"top\", 6, separator02),", "0.5, -0.5), (0.5, -0.5, -0.5), (0.5, -0.5, 0.5), (0.5, -0.5,", "blendColorsNode + \".color2\") cmds.connectAttr((blendColorsNode + \".output\"), (ogChain[x] + \".rotate\" ))", "createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=legikHandle[0]) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0, max=1)", "+ \".visibility\", cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=1,", "= cmds.curve(d=1, p=[(-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5,", "\"Ball\", \"Toe_Tap\"]: cmds.addAttr(ikFootControl, at=\"enum\", enumName = \"------\", ln=bone, k=1, r=1)", "(-1.108, 0, 0), (-0.784, 0,-2.5), (0, 0,-3), (0.784, 0, -2.5),", "and snap it between two joints switcherLoc = cmds.spaceLocator(n=side +", "0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (0.5, 0.5, 0.5),", "+ \"_grp\") #snap, parent offsetGrp, set color and then make", "\"hand_ik_anim\" ) # Rename shape node shapeList = cmds.listRelatives(crvIkCube, s", "\"Arm\": #print (\"scaleController\", scaleField_UI) armIk(scaleIK, masterIkHandle, HandleName) else: #print (\"scaleController\",", "visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ogChain[0] +", "endV = om.MVector(end[0], end[1], end[2]) startEnd = endV - startV", "1) cmds.delete(ikFootControl, ch = 1) cmds.delete(cmds.pointConstraint(ogChain[3] + \"_ik\", ikFootControlGrp)) cmds.color(ikFootControl,", "ogChain[2] + \"_ik\") handikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ikHandJoint, n=side", "# Parent ikController and PV under _rig_GRP cmds.parent(ikFootControlGrp, pvController +", "controller, else not if legOrArm == \"Leg\": if x ==", "2.39), (-0.997,0,1.789), (-1.108, 0, 0), (-0.784, 0,-2.5), (0, 0,-3), (0.784,", "pvName): ballikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ogChain[3] + \"_ik\", sol=\"ikSCsolver\",", "execButton = cmds.button(l=\"Duplicate Chain\", c=partial(duplicateChain, blendNodeFunc, constraintFunc)) cmds.formLayout(mainLayout, e=1, attachForm", "1: blendNodeFunc(scaleController, chainMenu) if constraintCheckBox == 1: constraintFunc(scaleController, chainMenu) if", "else not if legOrArm == \"Leg\": if x == (chainLen-1):", "max=1, dv=0.5) # Create attributes on ikController cmds.addAttr(ikFootControl, at=\"enum\",enumName =", "n=switcherLoc[0] + \"_grp\") cmds.color(switcherLoc, rgb=(255, 255, 0)) #yellow cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp))", "cmds.addAttr(pvController, at=\"enum\", enumName = \"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(pvController, ln=\"Follow\",", "sol=\"ikRPsolver\", n=side + HandleName + \"_ikHandle\") cmds.setAttr(masterIkHandle[0] + \".visibility\", 0)", "PV under _rig_GRP cmds.parent(crvIkCubeGrp, pvController + \"_grp\" ,rigGrp) #set SDK", "s = 1) cmds.delete(ikFootControl, ch = 1) cmds.delete(cmds.pointConstraint(ogChain[3] + \"_ik\",", "os=1) else: cmds.move(-10,0,0, ikHandJoint, r=1, os=1) cmds.parent(ikHandJoint, ogChain[2] + \"_ik\")", "t=1) mid = cmds.xform(ogChain[1], q=1, ws=1, t=1) end = cmds.xform(ogChain[2],", "from the channelbox for coord in [\"X\", \"Y\", \"Z\"]: cmds.setAttr(switcherLoc[0]", "dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController", "x == 0: continue cmds.parent(ogChain[x] + \"_fk_anim_grp\", ogChain[x-1] + \"_fk_anim\")", "selectChain) fkControllerCreator(scaleController, selectChain) def constraintFunc(scaleController, selectChain): # Create some blendColors", "FK and IK chains into blendColors channels and then connect", "= cmds.xform(ogChain[2], q=1, ws=1, t=1) startV = om.MVector(start[0], start[1], start[2])", "\".color1\") cmds.connectAttr((ogChain[x] + \"_fk.rotate\"), blendColorsNode + \".color2\") cmds.connectAttr((blendColorsNode + \".output\"),", "en=state-1))) blendCheckbox_UI = cmds.checkBox(label = \"blendColor Mode\", v=0, cc= lambda", "but for now it's ok chainLen = 3 #suffix for", "crvIkCubeGrp) cmds.parent(armikHandle[0], crvIkCube) pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=armikHandle[0]) cmds.addAttr(pvController,", "crvIkCubeGrp = cmds.group(n=crvIkCube + \"_grp\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", crvIkCubeGrp)) cmds.color(crvIkCube,", "All credits to https://vimeo.com/66015036 start = cmds.xform(ogChain[0], q=1, ws=1, t=1)", "side + chainMenu + \"_ctrl_grp\") cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp)) cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp)) cmds.parent(ctrlGrp,", "controllers with _fk hierarchy for x in range(chainLen): cmds.parentConstraint(ogChain[x] +", "in orienting FK controllers as the user wishes. Maybe this", "(\"scaleController\", scaleField_UI) armIk(scaleIK, masterIkHandle, HandleName) else: #print (\"scaleController\", scaleField_UI) legIK(scaleIK,", "== \"Leg\": asd = False cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd) #", "255) elif side == \"r_\": controllerColor = rgb=(255, 0, 0)", "as om from functools import partial def duplicateChain(*args): global ogChain", "(separator01, \"top\", 5, constraintCheckBox_UI), (scaleField_UI, \"top\", 5, separator01), (scaleControllerText, \"top\",", "copy their position and freeze transform cmds.joint(n = newJointName) cmds.matchTransform(newJointName,", "controller orientation based on second axis if orientController == \"x\":", "0: continue cmds.parent(ogChain[x] + \"_fk_anim_grp\", ogChain[x-1] + \"_fk_anim\") # Set", "joint for x in range(chainLen): # Setup orient constraints cmds.parentConstraint((ogChain[x]", "PV under _rig_GRP cmds.parent(ikFootControlGrp, pvController + \"_grp\" ,rigGrp) # Set", "startEnd = endV - startV startMid = midV - startV", "dv=1) def findPoleVector(loc, targetHandle): # This func is kinda black", "k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Twist\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Lateral_Roll\", k=1, r=1)", "# Buttons +1 and +3 count = 0 def addOneUnit(*args):", "k=1, r=1) # Parent ikController and PV under _rig_GRP cmds.parent(ikFootControlGrp,", "= cmds.joint(n=side + \"hand_ik\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", ikHandJoint)) cmds.makeIdentity(ikHandJoint, a", "5, separator01), (scaleControllerText, \"top\", 8, separator01), (plusOne_UI, \"top\", 4, separator01),", "+ \"_grp\" ,rigGrp) #set SDK visibility sdkDriver = switcherLoc[0] +", "= cmds.window(\"switchModeUI\", t=\"IKFK Builder\", w=300, h=300, s=1) mainLayout = cmds.formLayout(nd=50)", "6, 7, 8, 9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5],", "\"Y\", \"Z\"]: cmds.setAttr(switcherLoc[0] + \".translate\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0]", "k=[0,1,2,3,4,5,6,7,8,9,10], n=side + \"leg_anim_ik\") # Rename shape node shapeList =", "= 1, s = 0) #deselect to make the two", "cmds.makeIdentity(ikFootControl, a = 1, t = 1, r = 1,", "visCheck(vis): if vis == \"Arm\": asd = True if vis", "\"_grp\" ,rigGrp) #set SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\"", "ikSdkDriven = ogChain[x] + \"_parentConstraint1.\" + ogChain[x] + \"_ikW0\" fkSdkDriven", "cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", ikHandJoint)) cmds.makeIdentity(ikHandJoint, a = 1, t =", "(separator01, \"left\", 1), (separator01, \"right\", 2), #-------------------- (scaleField_UI, \"right\", 65),", "cmds.color(fk_controller, rgb=controllerColor) # Set SDK visibility sdkDriver = switcherLoc[0] +", "toeikHandle[0], legikHandle[0], ikFootControl) #---------- Making Pole Vector -------------# # Pole", "False cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd) # Buttons +1 and +3", "secondary axis\") cmds.menuItem(l=\"x\") cmds.menuItem(l=\"y\") cmds.menuItem(l=\"z\") # Scale the UI becase", "SDK cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1,", "in [\"X\", \"Y\", \"Z\"]: cmds.setAttr(switcherLoc[0] + \".translate\" + coord, k=0,", "end[1], end[2]) startEnd = endV - startV startMid = midV", "ws=True, t=True) cmds.xform(ikFootControl, ws=True, piv=piv) cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0], ikFootControl) #----------", "cmds.separator(h=5) separator02 = cmds.separator(h=5) # execButton = cmds.button(l=\"Duplicate Chain\", c=partial(duplicateChain,", "v=1) chainMenu = cmds.optionMenu(\"chainMenu_UI\", q=1, v=1) clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1,", "_anim controllers with _fk hierarchy for x in range(chainLen): cmds.parentConstraint(ogChain[x]", "shape node shapeList = cmds.listRelatives(crvIkCube, s = True) cmds.rename(shapeList, crvIkCube", "shape node shapeList = cmds.listRelatives(ikFootControl, s = True) cmds.rename(shapeList, ikFootControl", "mode and snap it between two joints switcherLoc = cmds.spaceLocator(n=side", "cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def", "= cmds.group(em=1, n=loc + \"_grp\") #snap, parent offsetGrp, set color", "= cmds.optionMenu(\"chainMenu_UI\", q=1, v=1) clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1, v=0) if", "+3 count = 0 def addOneUnit(*args): global count count =", "node shapeList = cmds.listRelatives(crvIkCube, s = True) cmds.rename(shapeList, crvIkCube +", "ee=ogChain[4] + \"_ik\", sol=\"ikSCsolver\", n=side + \"toe_ikHandle\") # Create and", "= cmds.button(l=\"+1\", c=addOneUnit) plusThree_UI = cmds.button(l=\"+3\", c=addThreeUnit) separator01 = cmds.separator(h=5)", "= cmds.button(l=\"Duplicate Chain\", c=partial(duplicateChain, blendNodeFunc, constraintFunc)) cmds.formLayout(mainLayout, e=1, attachForm =", "cmds.delete(cmds.pointConstraint(ogChain[3] + \"_ik\", ikFootControlGrp)) cmds.color(ikFootControl, rgb=controllerColor) # pivot snapping on", "controllerColor = rgb=(255, 0, 0) if chainMenu == \"Leg\": chainLen", "+ chainMenu + \"_ctrl_grp\") cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp)) cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp)) cmds.parent(ctrlGrp, rigGrp)", "IK chains into blendColors channels and then connect the output", "blendColorsNode + \".blender\") ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def constraintFunc(scaleController, selectChain):", "targetHandle) def showUI(): global chainMenu_UI global scaleField_UI global orientControllerMenu global", "top of the chain clavJoint = cmds.pickWalk(ogChain[0], d=\"up\")[0] #ogChain.insert(0, clavJoint)", "s = 0) if side == \"l_\": cmds.move(10,0,0, ikHandJoint, r=1,", "0) # Create a locator used for switching IK/FK mode", "freeze transform, create offset group and color cmds.scale(ikFootScale, ikFootScale, ikFootScale,", "1), (separator02, \"right\", 2), #-------------------- (orientControllerMenu, \"left\", 8), (orientControllerMenu, \"top\",", "v=1, min=1) plusOne_UI = cmds.button(l=\"+1\", c=addOneUnit) plusThree_UI = cmds.button(l=\"+3\", c=addThreeUnit)", "to make the two different hierarchies cmds.select(cl = 1) cmds.parent((ogChain[0]", "side global controllerColor global clavCheckbox global rigGrp, ctrlGrp ogRootchain =", "+ \"_fk_anim\") # Set orientConstraint _anim controllers with _fk hierarchy", "v=1, dv=0) cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=0, dv=1)", "constraintFunc(scaleController, selectChain): # Create some blendColors node with the same", "createSphere import maya.cmds as cmds import maya.OpenMaya as om from", "\"_ik.visibility\", 0) cmds.setAttr(ogChain[0] + \"_fk.visibility\", 0) # Create a locator", "controllers and group offsets # Change rotation, color for y", "1.5], n=side + \"hand_ik_anim\" ) # Rename shape node shapeList", "1, s = 1) cmds.delete(ikFootControl, ch = 1) cmds.delete(cmds.pointConstraint(ogChain[3] +", "dv=0.5) # Create attributes on ikController cmds.addAttr(ikFootControl, at=\"enum\",enumName = \"------\",", "x in range(chainLen): # Setup orient constraints cmds.parentConstraint((ogChain[x] + \"_ik\"),", "\"right\", 5), ], attachControl = [(clavCheckbox_UI, \"left\", 10, chainMenu_UI), (constraintCheckBox_UI,", "= cmds.createNode(\"blendColors\", n = ogChain[x] + \"_blend\") # Connect FK", "Import coordinates from ctrlUI_lib cmds.delete(cmds.pointConstraint(clavJoint, clavController)) # Create offset group,", "= cmds.optionMenu(\"UI_orientControllerMenu\", q=1, v=1) # Create controllers and group offsets", "ln=\"Attributes\", k=1, r=1) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0, max=1) cmds.addAttr(pvController,", "l=\"Which chain?\", cc=visCheck) cmds.menuItem(l=\"Leg\") cmds.menuItem(l=\"Arm\") constraintCheckBox_UI = cmds.checkBox(label = \"orientConsts+SDK", "5, 6, 7, 8, 9, 1.0, 1.1, 1.2, 1.3, 1.4,", "+ \"_fk_anim_grp.visibility\", cd=sdkDriver, v=0, dv=1) # Lock .t and .s", "ws=1, t=1) end = cmds.xform(ogChain[2], q=1, ws=1, t=1) startV =", "1.3, 1.4, 1.5], n=side + \"hand_ik_anim\" ) # Rename shape", "\"_ik\", ee=ikHandJoint, n=side + \"hand_ikHandle\", sol=\"ikSCsolver\") cmds.parent(handikHandle[0], armikHandle[0]) #create IK", "cd=sdkDriver, v=1, dv=1) def findPoleVector(loc, targetHandle): # This func is", "cmds.checkBox(blendCheckbox_UI, q=1, v=1) constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1, v=1) chainMenu =", "cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1)", "node shapeList = cmds.listRelatives(ikFootControl, s = True) cmds.rename(shapeList, ikFootControl +", "cmds.listRelatives(ikFootControl, s = True) cmds.rename(shapeList, ikFootControl + \"Shape\") ikFootControlGrp =", "SDK naming convention sdkDriver = switcherLoc[0] + \".FKIK_Mode\" ikSdkDriven =", "chain under clavicle controller cmds.parent((ogChain[0]+\"_fk_anim_grp\"),(ogChain[0] + \"_ik\"), (ogChain[0] + \"_fk\"),", "(-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (0.5,", "= 0, r = 1, s = 0) #deselect to", "coordinates from ctrlUI_lib cmds.delete(cmds.pointConstraint(clavJoint, clavController)) # Create offset group, FDH", "of the chain clavJoint = cmds.pickWalk(ogChain[0], d=\"up\")[0] #ogChain.insert(0, clavJoint) clavController", "cmds.createNode(\"blendColors\", n = ogChain[x] + \"_blend\") # Connect FK and", "\".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController", "(0, 0,-3), (0.784, 0, -2.5), (1.108, 0, 0), (0.997, 0,", "in [\"Ankle\", \"Ball\", \"Toe_Tap\"]: cmds.addAttr(ikFootControl, at=\"enum\", enumName = \"------\", ln=bone,", "l=1) cmds.setAttr(switcherLoc[0] + \".rotate\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] +", "the UI becase you'll never know scaleControllerText = cmds.text(l=\"Controllers size\")", "visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikFootControlGrp +", "vis == \"Arm\": asd = True if vis == \"Leg\":", "cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController +", "t = 1, r = 1, s = 0) if", "= cmds.checkBox(label = \"orientConsts+SDK Mode\", v=0, cc= lambda state: (cmds.checkBox(blendCheckbox_UI,", "blendColorsNode = cmds.createNode(\"blendColors\", n = ogChain[x] + \"_blend\") # Connect", "= createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=legikHandle[0]) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0,", "from ctrlUI_lib cmds.delete(cmds.pointConstraint(clavJoint, clavController)) # Create offset group, FDH and", "between two joints switcherLoc = cmds.spaceLocator(n=side + chainMenu + \"_ikfk_Switch\")", "l=1) # Create hierarchy groups rigGrp = cmds.group(em=1, n= side", "\"left\", 8), (chainMenu_UI, \"top\", 5), (chainMenu_UI, \"right\", 80), (clavCheckbox_UI, \"top\",", "= True, type = \"joint\")[0] ogChain = cmds.listRelatives(ogRootchain, ad =", "(0.784, 0, -2.5), (1.108, 0, 0), (0.997, 0, 1.789), (0,", "+ \"_grp.visibility\", cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\",", "cmds.move(-10,0,0, ikHandJoint, r=1, os=1) cmds.parent(ikHandJoint, ogChain[2] + \"_ik\") handikHandle =", "_rig_GRP cmds.parent(crvIkCubeGrp, pvController + \"_grp\" ,rigGrp) #set SDK visibility sdkDriver", "fk_controller) if orientController == \"z\": cmds.rotate(0,0,90, fk_controller) # Freeze transform,", "(ogChain[x] + \".rotate\" )) cmds.connectAttr(switcherLoc[0]+\".FKIK_Mode\", blendColorsNode + \".blender\") ikChainBuild(scaleController, selectChain)", "a joint, copy their position and freeze transform cmds.joint(n =", "\"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def findPoleVector(loc, targetHandle): # This func", "\"left\", 1), (separator01, \"right\", 2), #-------------------- (scaleField_UI, \"right\", 65), (scaleField_UI,", "= \"joint\")[0] ogChain = cmds.listRelatives(ogRootchain, ad = True, type =", "1) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1) ikChainBuild(scaleController,", "from UI scaleController = cmds.intField(scaleField_UI, q=1, v=1) blendCheckbox = cmds.checkBox(blendCheckbox_UI,", "state: (cmds.checkBox(blendCheckbox_UI, e=1, en=state-1))) blendCheckbox_UI = cmds.checkBox(label = \"blendColor Mode\",", "cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController +", "\".rotate\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".scale\" + coord,", "#(scaleField_UI, \"right\",0, 40), (plusOne_UI, \"right\", 0, 45), (plusThree_UI, \"right\", 0,", "\"_blend\") # Connect FK and IK chains into blendColors channels", "r=1) for coord in [\"X\", \"Y\", \"Z\"]: cmds.addAttr(ikFootControl, ln=bone+coord, k=1,", "cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp)) cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp)) cmds.parent(ctrlGrp, rigGrp) # Execute if blendCheckbox", "maya.OpenMaya as om from functools import partial def duplicateChain(*args): global", "p=[(0.997, 0, 1.789), (0, 0, 2.39), (-0.997,0,1.789), (-1.108, 0, 0),", "targetHandle=legikHandle[0]) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Leg_Foot\", k=1,", "(orientControllerMenu, \"top\", 5), #-------------------- (execButton, \"bottom\", 5), (execButton, \"left\", 5),", "cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp)) cmds.parent(clavController, clavControllerGrp) fixedScale = scaleClav/4 cmds.scale(fixedScale, fixedScale, fixedScale,", "sdkDriver = switcherLoc[0] + \".FKIK_Mode\" ikSdkDriven = ogChain[x] + \"_parentConstraint1.\"", "is kinda black magic # All credits to https://vimeo.com/66015036 start", "sol=\"ikSCsolver\", n=side + \"ball_ikHandle\") toeikHandle = cmds.ikHandle(sj=ogChain[3] + \"_ik\", ee=ogChain[4]", "= 0) if side == \"l_\": cmds.move(10,0,0, ikHandJoint, r=1, os=1)", "then connect the output to the original joint chain cmds.connectAttr((ogChain[x]", "= 1, r = 1, s = 0) if side", "cmds.xform(ogChain[0], q=1, ws=1, t=1) mid = cmds.xform(ogChain[1], q=1, ws=1, t=1)", "at the top of the chain clavJoint = cmds.pickWalk(ogChain[0], d=\"up\")[0]", "ee=ogChain[3] + \"_ik\", sol=\"ikSCsolver\", n=side + \"ball_ikHandle\") toeikHandle = cmds.ikHandle(sj=ogChain[3]", "+ \".output\"), (ogChain[x] + \".rotate\" )) cmds.connectAttr(switcherLoc[0]+\".FKIK_Mode\", blendColorsNode + \".blender\")", "- startV startMid = midV - startV dotP = startMid", "k=1, r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Clav_Hand\", k=1, r=1, min=0, max=1,", "original joint chain cmds.connectAttr((ogChain[x] + \"_ik.rotate\"), blendColorsNode + \".color1\") cmds.connectAttr((ogChain[x]", "0) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver,", "controllers as the user wishes. Maybe this can be improved", "# pivot snapping on ankle joint piv = cmds.xform(ogChain[2], q=True,", "* startEnd proj = float(dotP) / float(startEnd.length()) startEndN = startEnd.normal()", "clavicle Joint moving up and put it at the top", "cmds.parent(fk_controller, anim_group) # Set controller orientation based on second axis", "switcherLoc[0] + \".FKIK_Mode\" ikSdkDriven = ogChain[x] + \"_parentConstraint1.\" + ogChain[x]", "know scaleControllerText = cmds.text(l=\"Controllers size\") scaleField_UI = cmds.intField(en=10, v=1, min=1)", "(scaleControllerText, \"left\", 5), (separator02, \"left\", 1), (separator02, \"right\", 2), #--------------------", "clavController = createClav2(clavJoint + \"_anim\") # Import coordinates from ctrlUI_lib", "0.5, 0.5), (-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5), (-0.5, 0.5,", "# Useful in orienting FK controllers as the user wishes.", "1 cmds.intField(scaleField_UI, v=1+count, e=1) def addThreeUnit(*args): global count count =", "offsets # Change rotation, color for y in range(chainLen): anim_group", "cmds.setAttr(ogChain[0] + \"_ik.visibility\", 0) cmds.setAttr(ogChain[0] + \"_fk.visibility\", 0) # Create", "cmds.parent(crvIkCubeGrp, pvController + \"_grp\" ,rigGrp) #set SDK visibility sdkDriver =", "r=1) for bone in [\"Ankle\", \"Ball\", \"Toe_Tap\"]: cmds.addAttr(ikFootControl, at=\"enum\", enumName", "24), (scaleControllerText, \"left\", 5, 0), (scaleField_UI, \"left\", 110, 0), #(scaleField_UI,", "this can be improved orientControllerMenu = cmds.optionMenu(\"UI_orientControllerMenu\", l=\"What's the secondary", "\"Arm\": asd = True if vis == \"Leg\": asd =", "orientControllerMenu = cmds.optionMenu(\"UI_orientControllerMenu\", l=\"What's the secondary axis\") cmds.menuItem(l=\"x\") cmds.menuItem(l=\"y\") cmds.menuItem(l=\"z\")", "-0.5), (-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5),", "up clavControllerGrp = cmds.group(n=clavController + \"_grp\", em=1) cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp)) cmds.parent(clavController,", "def fkControllerCreator(fkSize, legOrArm): orientController = cmds.optionMenu(\"UI_orientControllerMenu\", q=1, v=1) # Create", "== \"Arm\": #print (\"scaleController\", scaleField_UI) armIk(scaleIK, masterIkHandle, HandleName) else: #print", "Create and place ik controller ikFootControl = cmds.curve(d=2, p=[(0.997, 0,", "# Pole Vector controller ---> Sphere pvController = createSphere(nome= side+pvName+\"_PV\")", "= cmds.checkBox(label = \"blendColor Mode\", v=0, cc= lambda state: (cmds.checkBox(constraintCheckBox_UI,", "func is kinda black magic # All credits to https://vimeo.com/66015036", "r = 1, s = 0) if side == \"l_\":", "cmds.color(ikFootControl, rgb=controllerColor) # pivot snapping on ankle joint piv =", "(-0.5, -0.5, -0.5)], k=[0, 1, 2, 3, 4, 5, 6,", "count count = count + 3 cmds.intField(scaleField_UI, v=1+count, e=1) def", "cmds.group(n=clavController + \"_grp\", em=1) cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp)) cmds.parent(clavController, clavControllerGrp) fixedScale =", "Set size, freeze transform, create offset group and color cmds.scale(ikFootScale,", "\"------\", ln=bone, k=1, r=1) for coord in [\"X\", \"Y\", \"Z\"]:", "plusThree_UI global clavCheckbox_UI if cmds.window(\"switchModeUI\", ex = 1): cmds.deleteUI(\"switchModeUI\") myWin", "== 0 and constraintCheckBox == 0: cmds.error(\"pls, select one relation", "+ \"hand_ikHandle\", sol=\"ikSCsolver\") cmds.parent(handikHandle[0], armikHandle[0]) #create IK controller ---> CUBE", "ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def fkControllerCreator(fkSize, legOrArm): orientController = cmds.optionMenu(\"UI_orientControllerMenu\",", "sol=\"ikSCsolver\") cmds.parent(handikHandle[0], armikHandle[0]) #create IK controller ---> CUBE crvIkCube =", "r=1) cmds.addAttr(ikFootControl, ln=\"Lateral_Roll\", k=1, r=1) for bone in [\"Ankle\", \"Ball\",", "-0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, 0.5),", "\"right\", 80), (clavCheckbox_UI, \"top\", 7), (blendCheckbox_UI, \"left\", 5), (separator01, \"left\",", "= 0) #deselect to make the two different hierarchies cmds.select(cl", "max=1) cmds.addAttr(pvController, ln=\"Follow_Clav_Hand\", k=1, r=1, min=0, max=1, dv=0.5) # Parent", "\"ball_ikHandle\") toeikHandle = cmds.ikHandle(sj=ogChain[3] + \"_ik\", ee=ogChain[4] + \"_ik\", sol=\"ikSCsolver\",", "cmds.color(switcherLoc, rgb=(255, 255, 0)) #yellow cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp)) cmds.parent(switcherLoc, switcherLocGrp) cmds.delete(cmds.pointConstraint(ogChain[1],", "cmds.xform(clavJoint, q=True, ws=True, t=True) cmds.xform(clavController, ws=True, piv=piv) cmds.xform(clavControllerGrp, ws=True, piv=piv)", "chainMenu == \"Leg\": chainLen = 5 else: #this is totally", "* proj arrowV = startMid - projV arrowV*= 10 #distance", "# Select clavicle Joint moving up and put it at", "count = count + 1 cmds.intField(scaleField_UI, v=1+count, e=1) def addThreeUnit(*args):", "end[2]) startEnd = endV - startV startMid = midV -", "True) cmds.setAttr(ogChain[0] + \"_ik.visibility\", 0) cmds.setAttr(ogChain[0] + \"_fk.visibility\", 0) #", "r = 1, s = 0) #deselect to make the", "+ \"_grp\" ,rigGrp) # Set SDK visibility sdkDriver = switcherLoc[0]", "range(chainLen): blendColorsNode = cmds.createNode(\"blendColors\", n = ogChain[x] + \"_blend\") #", "5), (scaleControllerText, \"left\", 5), (separator02, \"left\", 1), (separator02, \"right\", 2),", "coord in [\"X\", \"Y\", \"Z\"]: cmds.addAttr(ikFootControl, ln=bone+coord, k=1, r=1) #", "the chain clavJoint = cmds.pickWalk(ogChain[0], d=\"up\")[0] #ogChain.insert(0, clavJoint) clavController =", "cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver,", "def findPoleVector(loc, targetHandle): # This func is kinda black magic", "cmds.parentConstraint((ogChain[x] + \"_ik\"), ogChain[x]) cmds.parentConstraint((ogChain[x] + \"_fk\"), ogChain[x]) # Setup", "#print (\"scaleController\", scaleField_UI) legIK(scaleIK, masterIkHandle, HandleName) def armIk(armIkScale, armikHandle, pvName):", "at=\"enum\", enumName = \"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(pvController, ln=\"Follow\", k=1,", "pvName): ikHandJoint = cmds.joint(n=side + \"hand_ik\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", ikHandJoint))", "0.5, 0.5), (0.5, 0.5, -0.5), (0.5, -0.5, -0.5), (0.5, -0.5,", "the output to the original joint chain cmds.connectAttr((ogChain[x] + \"_ik.rotate\"),", "cmds.button(l=\"+3\", c=addThreeUnit) separator01 = cmds.separator(h=5) separator02 = cmds.separator(h=5) # execButton", "\"_fk\", ctrlGrp) cmds.parent(ogChain[0] + \"_fk_anim_grp\", ctrlGrp) cmds.parent(switcherLocGrp, rigGrp) def clavSel(scaleClav):", "cmds.addAttr(ikFootControl, at=\"enum\", enumName = \"------\", ln=bone, k=1, r=1) for coord", "[(clavCheckbox_UI, \"left\", 10, chainMenu_UI), (constraintCheckBox_UI, \"top\", 5, chainMenu_UI), (blendCheckbox_UI, \"top\",", "the top of the chain clavJoint = cmds.pickWalk(ogChain[0], d=\"up\")[0] #ogChain.insert(0,", "Making Pole Vector -------------# # Pole Vector controller ---> Sphere", "scaleController = cmds.intField(scaleField_UI, q=1, v=1) blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1, v=1)", "ogChain[2], switcherLocGrp)) cmds.addAttr(switcherLoc, ln=\"FKIK_Mode\", at=\"short\", min=0, max=1, k=1, r=1) cmds.move(0,0,-12,", "UI becase you'll never know scaleControllerText = cmds.text(l=\"Controllers size\") scaleField_UI", "0.5), (0.5, -0.5, -0.5), (-0.5, -0.5, -0.5)], k=[0, 1, 2,", "cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def legIK(ikFootScale, legikHandle, pvName):", "0, 0), (-0.784, 0,-2.5), (0, 0,-3), (0.784, 0, -2.5), (1.108,", "cmds.connectAttr(switcherLoc[0]+\".FKIK_Mode\", blendColorsNode + \".blender\") ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def constraintFunc(scaleController,", ".t and .s attributes #for x in [\"X\", \"Y\", \"Z\"]:", "+ midV cmds.xform(loc, ws=1, t=(finalV.x, finalV.y ,finalV.z)) locGrp = cmds.group(em=1,", "cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp)) cmds.parent(ctrlGrp, rigGrp) # Execute if blendCheckbox == 1:", "= 1, t = 0, r = 1, s =", "= startEnd.normal() projV = startEndN * proj arrowV = startMid", "= cmds.group(em=1, n=ikFootControl + \"_grp\") cmds.parent(ikFootControl, ikFootControlGrp) # Set size,", "createClav2, createSphere import maya.cmds as cmds import maya.OpenMaya as om", "cmds.curve(d=1, p=[(-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5),", "fk chain under clavicle controller cmds.parent((ogChain[0]+\"_fk_anim_grp\"),(ogChain[0] + \"_ik\"), (ogChain[0] +", "else: cmds.parent(ogChain[0] + \"_ik\", ogChain[0] + \"_fk\", ctrlGrp) cmds.parent(ogChain[0] +", "\"_ikHandle\") cmds.setAttr(masterIkHandle[0] + \".visibility\", 0) if HandleName == \"Arm\": #print", "Arm? chainMenu_UI = cmds.optionMenu(\"chainMenu_UI\", l=\"Which chain?\", cc=visCheck) cmds.menuItem(l=\"Leg\") cmds.menuItem(l=\"Arm\") constraintCheckBox_UI", "constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1, v=1) chainMenu = cmds.optionMenu(\"chainMenu_UI\", q=1, v=1)", "+ \"Shape\") crvIkCubeGrp = cmds.group(n=crvIkCube + \"_grp\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\",", "# Freeze transform, delete history and set color cmds.makeIdentity(fk_controller, a", "global constraintCheckBox_UI global blendCheckbox_UI global plusOne_UI global plusThree_UI global clavCheckbox_UI", "n= side + chainMenu + \"_rig_grp\") ctrlGrp = cmds.group(em=1, n=", "fk_controller) if orientController == \"y\": cmds.rotate(0,90,0, fk_controller) if orientController ==", "(-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5), (-0.5, 0.5, -0.5), (-0.5,", "= ogChain[x] + \"_parentConstraint1.\" + ogChain[x] + \"_fkW1\" # Setup", "(scaleField_UI, \"left\", 5), (plusOne_UI, \"right\", 5), (plusThree_UI, \"right\", 5), (scaleControllerText,", "1, s = 0) cmds.delete(fk_controller, ch = 1) cmds.color(fk_controller, rgb=controllerColor)", "SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(crvIkCubeGrp", "unscalable but for now it's ok chainLen = 3 #suffix", "v=asd) # Buttons +1 and +3 count = 0 def", "sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\",", "global orientControllerMenu global constraintCheckBox_UI global blendCheckbox_UI global plusOne_UI global plusThree_UI", "cc= lambda state: (cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1))) clavCheckbox_UI = cmds.checkBox(l=\"Clavicle\", vis=0)", "1.2, 1.3, 1.4, 1.5], n=side + \"hand_ik_anim\" ) # Rename", "# Connect FK and IK chains into blendColors channels and", "-0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, 0.5),", "piv=piv) cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0], ikFootControl) #---------- Making Pole Vector -------------#", ",rigGrp) # Set SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\"", "ikHandJoint, r=1, os=1) else: cmds.move(-10,0,0, ikHandJoint, r=1, os=1) cmds.parent(ikHandJoint, ogChain[2]", "controller ---> CUBE crvIkCube = cmds.curve(d=1, p=[(-0.5, 0.5, -0.5), (0.5,", "+ \"_fk\", ctrlGrp) cmds.parent(ogChain[0] + \"_fk_anim_grp\", ctrlGrp) cmds.parent(switcherLocGrp, rigGrp) def", "cmds.parent(switcherLoc, switcherLocGrp) cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp)) cmds.addAttr(switcherLoc, ln=\"FKIK_Mode\", at=\"short\", min=0, max=1,", "s = 0) cmds.delete(fk_controller, ch = 1) cmds.color(fk_controller, rgb=controllerColor) #", "if orientController == \"x\": cmds.rotate(90,0,0, fk_controller) if orientController == \"y\":", "+ \"_ik\", sol=\"ikSCsolver\", n=side + \"ball_ikHandle\") toeikHandle = cmds.ikHandle(sj=ogChain[3] +", "side == \"l_\": controllerColor = rgb=(0, 0, 255) elif side", "for the new chains newJointList = [\"_ik\", \"_fk\", \"_scale\"] for", "ln=\"FKIK_Mode\", at=\"short\", min=0, max=1, k=1, r=1) cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE", "\"_fk\") # If leg chain is selected delete toe controller,", "r=1) #IMPROVE THIS SHIT cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1) #remove .t, .r,", "controllerColor global clavCheckbox global rigGrp, ctrlGrp ogRootchain = cmds.ls(sl =", "chainLen global switcherLoc global side global controllerColor global clavCheckbox global", "Set scale cmds.scale(fkSize, fkSize, fkSize, fk_controller) cmds.matchTransform(anim_group, ogChain[y]) cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller))", "\"_ik\", ogChain[0] + \"_fk\", ctrlGrp) cmds.parent(ogChain[0] + \"_fk_anim_grp\", ctrlGrp) cmds.parent(switcherLocGrp,", "toeikHandle = cmds.ikHandle(sj=ogChain[3] + \"_ik\", ee=ogChain[4] + \"_ik\", sol=\"ikSCsolver\", n=side", "cmds.move(0,10,0, clavControllerGrp, ws=1, r=1) cmds.color(clavController, rgb=controllerColor) # Move pivots on", "= [\"_ik\", \"_fk\", \"_scale\"] for newJoint in newJointList: for i", "+ \"_fk\"), clavController) cmds.parent(clavControllerGrp, ctrlGrp) def visCheck(vis): if vis ==", "+ \"_ik\", ee=ogChain[2] + \"_ik\", sol=\"ikRPsolver\", n=side + HandleName +", "110, 0), #(scaleField_UI, \"right\",0, 40), (plusOne_UI, \"right\", 0, 45), (plusThree_UI,", "Select clavicle Joint moving up and put it at the", "the channelbox for coord in [\"X\", \"Y\", \"Z\"]: cmds.setAttr(switcherLoc[0] +", "# Create and place ik controller ikFootControl = cmds.curve(d=2, p=[(0.997,", "finalV.y ,finalV.z)) locGrp = cmds.group(em=1, n=loc + \"_grp\") #snap, parent", "(clavCheckbox_UI, \"top\", 7), (blendCheckbox_UI, \"left\", 5), (separator01, \"left\", 1), (separator01,", "k=0, l=1) # Create hierarchy groups rigGrp = cmds.group(em=1, n=", "= ogChain[x] + \"_parentConstraint1.\" + ogChain[x] + \"_ikW0\" fkSdkDriven =", "\"_fk_anim_grp\", ctrlGrp) cmds.parent(switcherLocGrp, rigGrp) def clavSel(scaleClav): # Select clavicle Joint", "and IK chains into blendColors channels and then connect the", "Setup SDK cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver,", "+ \"_ik\") handikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ikHandJoint, n=side +", "\"_fk_anim\", ogChain[x] + \"_fk\") # If leg chain is selected", "\"left\", 5), (execButton, \"right\", 5), ], attachControl = [(clavCheckbox_UI, \"left\",", "\"_grp\" ,rigGrp) # Set SDK visibility sdkDriver = switcherLoc[0] +", "\"_ik\", crvIkCubeGrp)) cmds.color(crvIkCube, rgb=controllerColor) cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp) cmds.parent(armikHandle[0], crvIkCube)", "\"Y\", \"Z\"]: cmds.addAttr(ikFootControl, ln=bone+coord, k=1, r=1) # Parent ikController and", "5, chainMenu_UI), (blendCheckbox_UI, \"top\", 5, chainMenu_UI), (separator01, \"top\", 5, constraintCheckBox_UI),", "side == \"l_\": cmds.move(10,0,0, ikHandJoint, r=1, os=1) else: cmds.move(-10,0,0, ikHandJoint,", "(constraintCheckBox_UI, \"left\", 0, 26), (blendCheckbox_UI, \"right\", 10, 24), (scaleControllerText, \"left\",", "#cmds.setAttr(fk_controller + \".translate\" + x, k=0, l=1) #cmds.setAttr(fk_controller + \".scale\"", "\"right\", 65), (scaleField_UI, \"left\", 5), (plusOne_UI, \"right\", 5), (plusThree_UI, \"right\",", "pvController + \"_grp\" ,rigGrp) # Set SDK visibility sdkDriver =", "x, k=0, l=1) # Create ordered hierarchy for x in", "hierarchy for x in reversed(range(chainLen)): if x == 0: continue", "orientConstraint _anim controllers with _fk hierarchy for x in range(chainLen):", "r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Clav_Hand\", k=1, r=1, min=0, max=1, dv=0.5)", "+ \".color1\") cmds.connectAttr((ogChain[x] + \"_fk.rotate\"), blendColorsNode + \".color2\") cmds.connectAttr((blendColorsNode +", "locGrp) cmds.makeIdentity(loc, a=1, t=1, r=1, s=1) cmds.color(loc, rgb=controllerColor) cmds.poleVectorConstraint(loc, targetHandle)", "cd=sdkDriver, v=0, dv=1) ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def fkControllerCreator(fkSize, legOrArm):", "= cmds.ls(sl = True, type = \"joint\")[0] ogChain = cmds.listRelatives(ogRootchain,", "(plusThree_UI, \"right\", 5), (scaleControllerText, \"left\", 5), (separator02, \"left\", 1), (separator02,", "= count + 1 cmds.intField(scaleField_UI, v=1+count, e=1) def addThreeUnit(*args): global", "e=1, attachForm = [ (chainMenu_UI, \"left\", 8), (chainMenu_UI, \"top\", 5),", "offset group and color cmds.scale(ikFootScale, ikFootScale, ikFootScale, ikFootControlGrp) cmds.move(0,-3.2,0, ikFootControl,", "cmds.listRelatives(crvIkCube, s = True) cmds.rename(shapeList, crvIkCube + \"Shape\") crvIkCubeGrp =", "ikHandJoint = cmds.joint(n=side + \"hand_ik\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", ikHandJoint)) cmds.makeIdentity(ikHandJoint,", "(0.5, 0.5, 0.5), (0.5, 0.5, -0.5), (0.5, -0.5, -0.5), (0.5,", "v=0, cc= lambda state: (cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1))) clavCheckbox_UI = cmds.checkBox(l=\"Clavicle\",", "the joint for x in range(chainLen): blendColorsNode = cmds.createNode(\"blendColors\", n", "t = 1, r = 1, s = 1) cmds.delete(ikFootControl,", "e=1, en=state-1))) blendCheckbox_UI = cmds.checkBox(label = \"blendColor Mode\", v=0, cc=", "\"_ik\", ee=ogChain[2] + \"_ik\", sol=\"ikRPsolver\", n=side + HandleName + \"_ikHandle\")", "= cmds.optionMenu(\"chainMenu_UI\", l=\"Which chain?\", cc=visCheck) cmds.menuItem(l=\"Leg\") cmds.menuItem(l=\"Arm\") constraintCheckBox_UI = cmds.checkBox(label", "\"_ik\", ee=ogChain[3] + \"_ik\", sol=\"ikSCsolver\", n=side + \"ball_ikHandle\") toeikHandle =", "import maya.cmds as cmds import maya.OpenMaya as om from functools", "crvIkCube + \"Shape\") crvIkCubeGrp = cmds.group(n=crvIkCube + \"_grp\") cmds.delete(cmds.parentConstraint(ogChain[2] +", "make Constraint cmds.delete(cmds.pointConstraint(loc, locGrp)) cmds.parent(loc, locGrp) cmds.makeIdentity(loc, a=1, t=1, r=1,", "e=1, en=state-1))) clavCheckbox_UI = cmds.checkBox(l=\"Clavicle\", vis=0) # Useful in orienting", "l=\"What's the secondary axis\") cmds.menuItem(l=\"x\") cmds.menuItem(l=\"y\") cmds.menuItem(l=\"z\") # Scale the", "k=1, r=1) for bone in [\"Ankle\", \"Ball\", \"Toe_Tap\"]: cmds.addAttr(ikFootControl, at=\"enum\",", "\"_ik\", sol=\"ikSCsolver\", n=side + \"toe_ikHandle\") # Create and place ik", "cmds.parent(clavController, clavControllerGrp) fixedScale = scaleClav/4 cmds.scale(fixedScale, fixedScale, fixedScale, clavController) cmds.makeIdentity(clavController,", "0,-2.5), (0, 0,-3), (0.784, 0, -2.5), (1.108, 0, 0), (0.997,", "clavJoint = cmds.pickWalk(ogChain[0], d=\"up\")[0] #ogChain.insert(0, clavJoint) clavController = createClav2(clavJoint +", "0, 0) if chainMenu == \"Leg\": chainLen = 5 else:", "two joints switcherLoc = cmds.spaceLocator(n=side + chainMenu + \"_ikfk_Switch\") switcherLocGrp", "= rgb=(255, 0, 0) if chainMenu == \"Leg\": chainLen =", "cmds.optionMenu(\"UI_orientControllerMenu\", q=1, v=1) # Create controllers and group offsets #", "5), (execButton, \"right\", 5), ], attachControl = [(clavCheckbox_UI, \"left\", 10,", "cmds.xform(clavControllerGrp, ws=True, piv=piv) cmds.orientConstraint(clavController, clavJoint) # Parent ik and fk", "v=0, dv=1) # Lock .t and .s attributes #for x", "cmds.parent(switcherLocGrp, rigGrp) def clavSel(scaleClav): # Select clavicle Joint moving up", "orientController == \"z\": cmds.rotate(0,0,90, fk_controller) # Freeze transform, delete history", "if vis == \"Arm\": asd = True if vis ==", "1: clavSel(scaleController) else: cmds.parent(ogChain[0] + \"_ik\", ogChain[0] + \"_fk\", ctrlGrp)", "\"_ik\", sol=\"ikSCsolver\", n=side + \"ball_ikHandle\") toeikHandle = cmds.ikHandle(sj=ogChain[3] + \"_ik\",", "findPoleVector(loc, targetHandle): # This func is kinda black magic #", "_fk hierarchy for x in range(chainLen): cmds.parentConstraint(ogChain[x] + \"_fk_anim\", ogChain[x]", "cmds.text(l=\"Controllers size\") scaleField_UI = cmds.intField(en=10, v=1, min=1) plusOne_UI = cmds.button(l=\"+1\",", "(chainMenu_UI, \"top\", 5), (chainMenu_UI, \"right\", 80), (clavCheckbox_UI, \"top\", 7), (blendCheckbox_UI,", "\"orientConsts+SDK Mode\", v=0, cc= lambda state: (cmds.checkBox(blendCheckbox_UI, e=1, en=state-1))) blendCheckbox_UI", "cmds.rename(shapeList, crvIkCube + \"Shape\") crvIkCubeGrp = cmds.group(n=crvIkCube + \"_grp\") cmds.delete(cmds.parentConstraint(ogChain[2]", "ikFootControlGrp = cmds.group(em=1, n=ikFootControl + \"_grp\") cmds.parent(ikFootControl, ikFootControlGrp) # Set", "position and freeze transform cmds.joint(n = newJointName) cmds.matchTransform(newJointName, ogChain[i]) cmds.makeIdentity(newJointName,", "ikChainBuild(scaleIK, HandleName): masterIkHandle = cmds.ikHandle(sj=ogChain[0] + \"_ik\", ee=ogChain[2] + \"_ik\",", "Useful in selecting which chain: Leg or Arm? chainMenu_UI =", "cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", crvIkCubeGrp)) cmds.color(crvIkCube, rgb=controllerColor) cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp)", "can be improved orientControllerMenu = cmds.optionMenu(\"UI_orientControllerMenu\", l=\"What's the secondary axis\")", "second axis if orientController == \"x\": cmds.rotate(90,0,0, fk_controller) if orientController", "y in range(chainLen): anim_group = cmds.group(em=1, n=ogChain[y] + \"_fk_anim_grp\") fk_controller", "cmds.window(\"switchModeUI\", t=\"IKFK Builder\", w=300, h=300, s=1) mainLayout = cmds.formLayout(nd=50) #", "switcherLocGrp = cmds.group(em=1, n=switcherLoc[0] + \"_grp\") cmds.color(switcherLoc, rgb=(255, 255, 0))", "clavControllerGrp)) cmds.parent(clavController, clavControllerGrp) fixedScale = scaleClav/4 cmds.scale(fixedScale, fixedScale, fixedScale, clavController)", "+ \"_ik\", ee=ikHandJoint, n=side + \"hand_ikHandle\", sol=\"ikSCsolver\") cmds.parent(handikHandle[0], armikHandle[0]) #create", "d=\"up\")[0] #ogChain.insert(0, clavJoint) clavController = createClav2(clavJoint + \"_anim\") # Import", "for x in range(chainLen): blendColorsNode = cmds.createNode(\"blendColors\", n = ogChain[x]", "transform cmds.joint(n = newJointName) cmds.matchTransform(newJointName, ogChain[i]) cmds.makeIdentity(newJointName, a = 1,", "if legOrArm == \"Leg\": if x == (chainLen-1): cmds.delete(ogChain[chainLen-1] +", "side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=armikHandle[0]) cmds.addAttr(pvController, at=\"enum\", enumName = \"------\", ln=\"Attributes\", k=1,", "dv=0) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikFootControlGrp", "(chainMenu_UI, \"left\", 8), (chainMenu_UI, \"top\", 5), (chainMenu_UI, \"right\", 80), (clavCheckbox_UI,", "+ \"_fk_anim_grp\") fk_controller = cmds.circle(n=ogChain[y] + \"_fk_anim\")[0] # If not", "= cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ogChain[3] + \"_ik\", sol=\"ikSCsolver\", n=side +", "arrowV + midV cmds.xform(loc, ws=1, t=(finalV.x, finalV.y ,finalV.z)) locGrp =", "v=0, dv=1) ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def fkControllerCreator(fkSize, legOrArm): orientController", "maya.cmds as cmds import maya.OpenMaya as om from functools import", "cmds.joint(n=side + \"hand_ik\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", ikHandJoint)) cmds.makeIdentity(ikHandJoint, a =", "startEnd.normal() projV = startEndN * proj arrowV = startMid -", "size\") scaleField_UI = cmds.intField(en=10, v=1, min=1) plusOne_UI = cmds.button(l=\"+1\", c=addOneUnit)", "controller cmds.parent((ogChain[0]+\"_fk_anim_grp\"),(ogChain[0] + \"_ik\"), (ogChain[0] + \"_fk\"), clavController) cmds.parent(clavControllerGrp, ctrlGrp)", "1) cmds.delete(cmds.pointConstraint(ogChain[3] + \"_ik\", ikFootControlGrp)) cmds.color(ikFootControl, rgb=controllerColor) # pivot snapping", "= ogRootchain[0:2] # Initialize input from UI scaleController = cmds.intField(scaleField_UI,", "4, 5, 6, 7, 8, 9, 1.0, 1.1, 1.2, 1.3,", "0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5), (-0.5, 0.5,", "proj = float(dotP) / float(startEnd.length()) startEndN = startEnd.normal() projV =", "ogRootchain = cmds.ls(sl = True, type = \"joint\")[0] ogChain =", "(0.5, -0.5, -0.5), (0.5, -0.5, 0.5), (0.5, -0.5, -0.5), (-0.5,", "clavCheckbox global rigGrp, ctrlGrp ogRootchain = cmds.ls(sl = True, type", "name of the joint for x in range(chainLen): blendColorsNode =", "\"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def legIK(ikFootScale, legikHandle, pvName): ballikHandle =", "-0.5, 0.5), (0.5, -0.5, -0.5), (-0.5, -0.5, -0.5)], k=[0, 1,", "import maya.OpenMaya as om from functools import partial def duplicateChain(*args):", "1.0, 1.1, 1.2, 1.3, 1.4, 1.5], n=side + \"hand_ik_anim\" )", "if clavCheckbox == 1: clavSel(scaleController) else: cmds.parent(ogChain[0] + \"_ik\", ogChain[0]", "cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE THIS SHIT cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1) #remove", "enumName = \"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1,", "freeze transform cmds.joint(n = newJointName) cmds.matchTransform(newJointName, ogChain[i]) cmds.makeIdentity(newJointName, a =", "+ \"_parentConstraint1.\" + ogChain[x] + \"_fkW1\" # Setup SDK cmds.setAttr(sdkDriver,", "hierarchies cmds.select(cl = 1) cmds.parent((ogChain[0] + \"_ik\"), world = True)", "1) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ogChain[0]", "#create IK controller ---> CUBE crvIkCube = cmds.curve(d=1, p=[(-0.5, 0.5,", "k=1, r=1, min=0, max=1, dv=0.5) # Create attributes on ikController", "cmds.group(em=1, n= side + chainMenu + \"_rig_grp\") ctrlGrp = cmds.group(em=1,", "for x in range(chainLen): cmds.parentConstraint(ogChain[x] + \"_fk_anim\", ogChain[x] + \"_fk\")", "t=\"IKFK Builder\", w=300, h=300, s=1) mainLayout = cmds.formLayout(nd=50) # Useful", "255, 0)) #yellow cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp)) cmds.parent(switcherLoc, switcherLocGrp) cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp))", "global scaleField_UI global orientControllerMenu global constraintCheckBox_UI global blendCheckbox_UI global plusOne_UI", "\"y\": cmds.rotate(0,90,0, fk_controller) if orientController == \"z\": cmds.rotate(0,0,90, fk_controller) #", "= om.MVector(end[0], end[1], end[2]) startEnd = endV - startV startMid", "ogChain[x] + \"_fkW1\" # Setup SDK cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver,", "n=side + \"hand_ikHandle\", sol=\"ikSCsolver\") cmds.parent(handikHandle[0], armikHandle[0]) #create IK controller --->", "selected delete toe controller, else not if legOrArm == \"Leg\":", "from joint finalV = arrowV + midV cmds.xform(loc, ws=1, t=(finalV.x,", "Maya underworld # Set scale cmds.scale(fkSize, fkSize, fkSize, fk_controller) cmds.matchTransform(anim_group,", "+ \"_fk\"), ogChain[x]) # Setup SDK naming convention sdkDriver =", "fixedScale = scaleClav/4 cmds.scale(fixedScale, fixedScale, fixedScale, clavController) cmds.makeIdentity(clavController, a=1) cmds.move(0,10,0,", "ogChain[x-1] + \"_fk_anim\") # Set orientConstraint _anim controllers with _fk", "cmds.parent(handikHandle[0], armikHandle[0]) #create IK controller ---> CUBE crvIkCube = cmds.curve(d=1,", "(chainMenu_UI, \"right\", 80), (clavCheckbox_UI, \"top\", 7), (blendCheckbox_UI, \"left\", 5), (separator01,", "# Initialize input from UI scaleController = cmds.intField(scaleField_UI, q=1, v=1)", "= float(dotP) / float(startEnd.length()) startEndN = startEnd.normal() projV = startEndN", "# Setup SDK naming convention sdkDriver = switcherLoc[0] + \".FKIK_Mode\"", "k=1, r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Leg_Foot\", k=1, r=1, min=0, max=1,", "ikController and PV under _rig_GRP cmds.parent(ikFootControlGrp, pvController + \"_grp\" ,rigGrp)", "functools import partial def duplicateChain(*args): global ogChain global chainLen global", "8), (chainMenu_UI, \"top\", 5), (chainMenu_UI, \"right\", 80), (clavCheckbox_UI, \"top\", 7),", "+ \".color2\") cmds.connectAttr((blendColorsNode + \".output\"), (ogChain[x] + \".rotate\" )) cmds.connectAttr(switcherLoc[0]+\".FKIK_Mode\",", "end = cmds.xform(ogChain[2], q=1, ws=1, t=1) startV = om.MVector(start[0], start[1],", "group offsets # Change rotation, color for y in range(chainLen):", "ws=True, piv=piv) cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0], ikFootControl) #---------- Making Pole Vector", "masterIkHandle, HandleName) def armIk(armIkScale, armikHandle, pvName): ikHandJoint = cmds.joint(n=side +", "This func is kinda black magic # All credits to", "cmds.intField(scaleField_UI, v=1+count, e=1) def addThreeUnit(*args): global count count = count", "2), #-------------------- (orientControllerMenu, \"left\", 8), (orientControllerMenu, \"top\", 5), #-------------------- (execButton,", "never know scaleControllerText = cmds.text(l=\"Controllers size\") scaleField_UI = cmds.intField(en=10, v=1,", "scaleClav/4 cmds.scale(fixedScale, fixedScale, fixedScale, clavController) cmds.makeIdentity(clavController, a=1) cmds.move(0,10,0, clavControllerGrp, ws=1,", "cc=visCheck) cmds.menuItem(l=\"Leg\") cmds.menuItem(l=\"Arm\") constraintCheckBox_UI = cmds.checkBox(label = \"orientConsts+SDK Mode\", v=0,", "blendNodeFunc(scaleController, chainMenu) if constraintCheckBox == 1: constraintFunc(scaleController, chainMenu) if clavCheckbox", "+ \"hand_ik_anim\" ) # Rename shape node shapeList = cmds.listRelatives(crvIkCube,", "parent offsetGrp, set color and then make Constraint cmds.delete(cmds.pointConstraint(loc, locGrp))", "v=1) # Create controllers and group offsets # Change rotation,", "switcherLocGrp)) cmds.addAttr(switcherLoc, ln=\"FKIK_Mode\", at=\"short\", min=0, max=1, k=1, r=1) cmds.move(0,0,-12, switcherLocGrp,", "# Parent ik and fk chain under clavicle controller cmds.parent((ogChain[0]+\"_fk_anim_grp\"),(ogChain[0]", "ws=1, t=1) startV = om.MVector(start[0], start[1], start[2]) midV = om.MVector(mid[0],", "reversed(range(chainLen)): if x == 0: continue cmds.parent(ogChain[x] + \"_fk_anim_grp\", ogChain[x-1]", "cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0)", "0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5), (0.5, -0.5, -0.5),", "+ \"leg_anim_ik\") # Rename shape node shapeList = cmds.listRelatives(ikFootControl, s", "\"top\", 5), #-------------------- (execButton, \"bottom\", 5), (execButton, \"left\", 5), (execButton,", "ws=1, t=(finalV.x, finalV.y ,finalV.z)) locGrp = cmds.group(em=1, n=loc + \"_grp\")", "\"_ik\"), (ogChain[0] + \"_fk\"), clavController) cmds.parent(clavControllerGrp, ctrlGrp) def visCheck(vis): if", "color cmds.makeIdentity(fk_controller, a = 1, t = 1, r =", "fkControllerCreator(scaleController, selectChain) def constraintFunc(scaleController, selectChain): # Create some blendColors node", "(blendCheckbox_UI, \"top\", 5, chainMenu_UI), (separator01, \"top\", 5, constraintCheckBox_UI), (scaleField_UI, \"top\",", "hierarchy for x in range(chainLen): cmds.parentConstraint(ogChain[x] + \"_fk_anim\", ogChain[x] +", "= midV - startV dotP = startMid * startEnd proj", "Leg or Arm? chainMenu_UI = cmds.optionMenu(\"chainMenu_UI\", l=\"Which chain?\", cc=visCheck) cmds.menuItem(l=\"Leg\")", "ctrlGrp)) cmds.parent(ctrlGrp, rigGrp) # Execute if blendCheckbox == 1: blendNodeFunc(scaleController,", "constraintCheckBox == 1: constraintFunc(scaleController, chainMenu) if clavCheckbox == 1: clavSel(scaleController)", "it between two joints switcherLoc = cmds.spaceLocator(n=side + chainMenu +", "locGrp = cmds.group(em=1, n=loc + \"_grp\") #snap, parent offsetGrp, set", "+ \"_ik\", ogChain[0] + \"_fk\", ctrlGrp) cmds.parent(ogChain[0] + \"_fk_anim_grp\", ctrlGrp)", "\"_fk_anim_grp\") fk_controller = cmds.circle(n=ogChain[y] + \"_fk_anim\")[0] # If not [0]", "chains into blendColors channels and then connect the output to", "cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1) ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def fkControllerCreator(fkSize,", "Lock .t and .s attributes #for x in [\"X\", \"Y\",", "Create attributes on ikController cmds.addAttr(ikFootControl, at=\"enum\",enumName = \"------\", ln=\"Attributes\", k=1,", "max=1) cmds.addAttr(pvController, ln=\"Follow_Leg_Foot\", k=1, r=1, min=0, max=1, dv=0.5) # Create", "fkSize, fkSize, fk_controller) cmds.matchTransform(anim_group, ogChain[y]) cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller)) cmds.parent(fk_controller, anim_group) #", "Create controllers and group offsets # Change rotation, color for", "+ \"_grp.visibility\", cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\",", "65), (scaleField_UI, \"left\", 5), (plusOne_UI, \"right\", 5), (plusThree_UI, \"right\", 5),", "rgb=(255, 255, 0)) #yellow cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp)) cmds.parent(switcherLoc, switcherLocGrp) cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2],", "5), (plusOne_UI, \"right\", 5), (plusThree_UI, \"right\", 5), (scaleControllerText, \"left\", 5),", "Freeze transform, delete history and set color cmds.makeIdentity(fk_controller, a =", "chain is selected delete toe controller, else not if legOrArm", "chainLen = 3 #suffix for the new chains newJointList =", "ln=\"Follow\", k=1, r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Clav_Hand\", k=1, r=1, min=0,", "chains newJointList = [\"_ik\", \"_fk\", \"_scale\"] for newJoint in newJointList:", "+ \"_fk_anim_grp.visibility\", cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\",", "dv=0) cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=0, dv=1) #", "cmds.makeIdentity(clavController, a=1) cmds.move(0,10,0, clavControllerGrp, ws=1, r=1) cmds.color(clavController, rgb=controllerColor) # Move", "in selecting which chain: Leg or Arm? chainMenu_UI = cmds.optionMenu(\"chainMenu_UI\",", "-0.5, -0.5), (-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5,", "cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1) #remove .t, .r, .s and .v from", "min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Clav_Hand\", k=1, r=1, min=0, max=1, dv=0.5) #", "switcherLocGrp) cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp)) cmds.addAttr(switcherLoc, ln=\"FKIK_Mode\", at=\"short\", min=0, max=1, k=1,", "pivots on clavicle joint piv = cmds.xform(clavJoint, q=True, ws=True, t=True)", "\"Y\", \"Z\"]: #cmds.setAttr(fk_controller + \".translate\" + x, k=0, l=1) #cmds.setAttr(fk_controller", "IK controller ---> CUBE crvIkCube = cmds.curve(d=1, p=[(-0.5, 0.5, -0.5),", "chain clavJoint = cmds.pickWalk(ogChain[0], d=\"up\")[0] #ogChain.insert(0, clavJoint) clavController = createClav2(clavJoint", "cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Clav_Hand\", k=1, r=1,", "ikFootScale, ikFootControlGrp) cmds.move(0,-3.2,0, ikFootControl, r=1) cmds.makeIdentity(ikFootControl, a = 1, t", "myWin = cmds.window(\"switchModeUI\", t=\"IKFK Builder\", w=300, h=300, s=1) mainLayout =", "make the two different hierarchies cmds.select(cl = 1) cmds.parent((ogChain[0] +", "switcherLocGrp)) cmds.parent(switcherLoc, switcherLocGrp) cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp)) cmds.addAttr(switcherLoc, ln=\"FKIK_Mode\", at=\"short\", min=0,", "ln=\"Follow_Leg_Foot\", k=1, r=1, min=0, max=1, dv=0.5) # Create attributes on", "s = True) cmds.rename(shapeList, crvIkCube + \"Shape\") crvIkCubeGrp = cmds.group(n=crvIkCube", "= 1) cmds.parent((ogChain[0] + \"_ik\"), world = True) cmds.setAttr(ogChain[0] +", "k=0, l=1) # Create ordered hierarchy for x in reversed(range(chainLen)):", "dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def findPoleVector(loc, targetHandle):", "put it at the top of the chain clavJoint =", "v=1+count, e=1) def addThreeUnit(*args): global count count = count +", "(scaleField_UI, \"right\", 65), (scaleField_UI, \"left\", 5), (plusOne_UI, \"right\", 5), (plusThree_UI,", "cmds.matchTransform(newJointName, ogChain[i]) cmds.makeIdentity(newJointName, a = 1, t = 0, r", "Set orientConstraint _anim controllers with _fk hierarchy for x in", "v=1, dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def legIK(ikFootScale,", "Sphere pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=legikHandle[0]) cmds.addAttr(pvController, ln=\"Follow\", k=1,", "range(chainLen): cmds.parentConstraint(ogChain[x] + \"_fk_anim\", ogChain[x] + \"_fk\") # If leg", "cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd) # Buttons +1 and +3 count", "cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1)", "chainMenu_UI global scaleField_UI global orientControllerMenu global constraintCheckBox_UI global blendCheckbox_UI global", "\"leg_anim_ik\") # Rename shape node shapeList = cmds.listRelatives(ikFootControl, s =", "= \"------\", ln=bone, k=1, r=1) for coord in [\"X\", \"Y\",", "ik and fk chain under clavicle controller cmds.parent((ogChain[0]+\"_fk_anim_grp\"),(ogChain[0] + \"_ik\"),", "for newJoint in newJointList: for i in range(chainLen): if blendCheckbox", "+ \".visibility\", 0) if HandleName == \"Arm\": #print (\"scaleController\", scaleField_UI)", "= cmds.group(n=clavController + \"_grp\", em=1) cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp)) cmds.parent(clavController, clavControllerGrp) fixedScale", "def visCheck(vis): if vis == \"Arm\": asd = True if", "= ogChain[i] + newJoint #create a joint, copy their position", "n=side + \"ball_ikHandle\") toeikHandle = cmds.ikHandle(sj=ogChain[3] + \"_ik\", ee=ogChain[4] +", "x, k=0, l=1) #cmds.setAttr(fk_controller + \".scale\" + x, k=0, l=1)", "selectChain) def constraintFunc(scaleController, selectChain): # Create some blendColors node with", "orientControllerMenu global constraintCheckBox_UI global blendCheckbox_UI global plusOne_UI global plusThree_UI global", "selectChain): # Create some blendColors node with the same name", "+ \"ball_ikHandle\") toeikHandle = cmds.ikHandle(sj=ogChain[3] + \"_ik\", ee=ogChain[4] + \"_ik\",", "cmds.group(em=1, n=switcherLoc[0] + \"_grp\") cmds.color(switcherLoc, rgb=(255, 255, 0)) #yellow cmds.delete(cmds.pointConstraint(switcherLoc,", "Change rotation, color for y in range(chainLen): anim_group = cmds.group(em=1,", "clavCheckbox_UI = cmds.checkBox(l=\"Clavicle\", vis=0) # Useful in orienting FK controllers", "\"left\", 1), (separator02, \"right\", 2), #-------------------- (orientControllerMenu, \"left\", 8), (orientControllerMenu,", "n=ogChain[y] + \"_fk_anim_grp\") fk_controller = cmds.circle(n=ogChain[y] + \"_fk_anim\")[0] # If", "+ \"toe_ikHandle\") # Create and place ik controller ikFootControl =", ".t, .r, .s and .v from the channelbox for coord", "= cmds.pickWalk(ogChain[0], d=\"up\")[0] #ogChain.insert(0, clavJoint) clavController = createClav2(clavJoint + \"_anim\")", "1) cmds.parent((ogChain[0] + \"_ik\"), world = True) cmds.setAttr(ogChain[0] + \"_ik.visibility\",", "plusThree_UI = cmds.button(l=\"+3\", c=addThreeUnit) separator01 = cmds.separator(h=5) separator02 = cmds.separator(h=5)", "global rigGrp, ctrlGrp ogRootchain = cmds.ls(sl = True, type =", "s = True) cmds.rename(shapeList, ikFootControl + \"Shape\") ikFootControlGrp = cmds.group(em=1,", "= [ (chainMenu_UI, \"left\", 8), (chainMenu_UI, \"top\", 5), (chainMenu_UI, \"right\",", "armIkScale, armIkScale, crvIkCubeGrp) cmds.parent(armikHandle[0], crvIkCube) pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController,", "0) cmds.delete(fk_controller, ch = 1) cmds.color(fk_controller, rgb=controllerColor) # Set SDK", "locator used for switching IK/FK mode and snap it between", "addOneUnit(*args): global count count = count + 1 cmds.intField(scaleField_UI, v=1+count,", "underworld # Set scale cmds.scale(fkSize, fkSize, fkSize, fk_controller) cmds.matchTransform(anim_group, ogChain[y])", "https://vimeo.com/66015036 start = cmds.xform(ogChain[0], q=1, ws=1, t=1) mid = cmds.xform(ogChain[1],", "(separator02, \"top\", 6, scaleField_UI), (orientControllerMenu, \"top\", 6, separator02), ], attachPosition", "in range(chainLen): # Setup orient constraints cmds.parentConstraint((ogChain[x] + \"_ik\"), ogChain[x])", "masterIkHandle = cmds.ikHandle(sj=ogChain[0] + \"_ik\", ee=ogChain[2] + \"_ik\", sol=\"ikRPsolver\", n=side", "world = True) cmds.setAttr(ogChain[0] + \"_ik.visibility\", 0) cmds.setAttr(ogChain[0] + \"_fk.visibility\",", "# execButton = cmds.button(l=\"Duplicate Chain\", c=partial(duplicateChain, blendNodeFunc, constraintFunc)) cmds.formLayout(mainLayout, e=1,", "with _fk hierarchy for x in range(chainLen): cmds.parentConstraint(ogChain[x] + \"_fk_anim\",", "color for y in range(chainLen): anim_group = cmds.group(em=1, n=ogChain[y] +", "= cmds.optionMenu(\"UI_orientControllerMenu\", l=\"What's the secondary axis\") cmds.menuItem(l=\"x\") cmds.menuItem(l=\"y\") cmds.menuItem(l=\"z\") #", "= \"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0,", "= ogChain[x] + \"_blend\") # Connect FK and IK chains", "40), (plusOne_UI, \"right\", 0, 45), (plusThree_UI, \"right\", 0, 49) ]", "def duplicateChain(*args): global ogChain global chainLen global switcherLoc global side", "= cmds.group(em=1, n=ogChain[y] + \"_fk_anim_grp\") fk_controller = cmds.circle(n=ogChain[y] + \"_fk_anim\")[0]", "def ikChainBuild(scaleIK, HandleName): masterIkHandle = cmds.ikHandle(sj=ogChain[0] + \"_ik\", ee=ogChain[2] +", "+ \"_grp\") cmds.color(switcherLoc, rgb=(255, 255, 0)) #yellow cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp)) cmds.parent(switcherLoc,", "joint, copy their position and freeze transform cmds.joint(n = newJointName)", "chain?\", cc=visCheck) cmds.menuItem(l=\"Leg\") cmds.menuItem(l=\"Arm\") constraintCheckBox_UI = cmds.checkBox(label = \"orientConsts+SDK Mode\",", "\"_anim\") # Import coordinates from ctrlUI_lib cmds.delete(cmds.pointConstraint(clavJoint, clavController)) # Create", "1, s = 0) if side == \"l_\": cmds.move(10,0,0, ikHandJoint,", "if side == \"l_\": controllerColor = rgb=(0, 0, 255) elif", "\"_fk_anim\") # Set orientConstraint _anim controllers with _fk hierarchy for", "groups rigGrp = cmds.group(em=1, n= side + chainMenu + \"_rig_grp\")", "from ctrlUI_lib import createClav2, createSphere import maya.cmds as cmds import", "r=1, s=1) cmds.color(loc, rgb=controllerColor) cmds.poleVectorConstraint(loc, targetHandle) def showUI(): global chainMenu_UI", "hierarchy groups rigGrp = cmds.group(em=1, n= side + chainMenu +", "\"joint\") ogChain.append(ogRootchain) ogChain.reverse() side = ogRootchain[0:2] # Initialize input from", "#-------------------- (scaleField_UI, \"right\", 65), (scaleField_UI, \"left\", 5), (plusOne_UI, \"right\", 5),", "the secondary axis\") cmds.menuItem(l=\"x\") cmds.menuItem(l=\"y\") cmds.menuItem(l=\"z\") # Scale the UI", "same name of the joint for x in range(chainLen): #", "== \"Arm\": asd = True if vis == \"Leg\": asd", "v=1+count, e=1) def blendNodeFunc(scaleController, selectChain): # Create some blendColors node", "offsetGrp, set color and then make Constraint cmds.delete(cmds.pointConstraint(loc, locGrp)) cmds.parent(loc,", "clavControllerGrp) fixedScale = scaleClav/4 cmds.scale(fixedScale, fixedScale, fixedScale, clavController) cmds.makeIdentity(clavController, a=1)", "Set controller orientation based on second axis if orientController ==", "Set SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0)", "= 1, r = 1, s = 1) cmds.delete(ikFootControl, ch", "HandleName == \"Arm\": #print (\"scaleController\", scaleField_UI) armIk(scaleIK, masterIkHandle, HandleName) else:", "masterIkHandle, HandleName) else: #print (\"scaleController\", scaleField_UI) legIK(scaleIK, masterIkHandle, HandleName) def", "to the original joint chain cmds.connectAttr((ogChain[x] + \"_ik.rotate\"), blendColorsNode +", "cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ikHandJoint, n=side + \"hand_ikHandle\", sol=\"ikSCsolver\") cmds.parent(handikHandle[0], armikHandle[0])", "n=loc + \"_grp\") #snap, parent offsetGrp, set color and then", "for x in reversed(range(chainLen)): if x == 0: continue cmds.parent(ogChain[x]", "chainMenu + \"_ctrl_grp\") cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp)) cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp)) cmds.parent(ctrlGrp, rigGrp) #", "+ \".visibility\", cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=0,", "ex = 1): cmds.deleteUI(\"switchModeUI\") myWin = cmds.window(\"switchModeUI\", t=\"IKFK Builder\", w=300,", "80), (clavCheckbox_UI, \"top\", 7), (blendCheckbox_UI, \"left\", 5), (separator01, \"left\", 1),", "+ \"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def legIK(ikFootScale, legikHandle, pvName): ballikHandle", "becase you'll never know scaleControllerText = cmds.text(l=\"Controllers size\") scaleField_UI =", "vis=asd, v=asd) # Buttons +1 and +3 count = 0", "k=1, r=1) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Clav_Hand\",", "r=1) cmds.makeIdentity(ikFootControl, a = 1, t = 1, r =", "dv=1) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1) ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def", "True) cmds.rename(shapeList, crvIkCube + \"Shape\") crvIkCubeGrp = cmds.group(n=crvIkCube + \"_grp\")", "cmds.select(cl = 1) cmds.parent((ogChain[0] + \"_ik\"), world = True) cmds.setAttr(ogChain[0]", "1: constraintFunc(scaleController, chainMenu) if clavCheckbox == 1: clavSel(scaleController) else: cmds.parent(ogChain[0]", "0), (scaleField_UI, \"left\", 110, 0), #(scaleField_UI, \"right\",0, 40), (plusOne_UI, \"right\",", "+ \"_fk.rotate\"), blendColorsNode + \".color2\") cmds.connectAttr((blendColorsNode + \".output\"), (ogChain[x] +", "legOrArm): orientController = cmds.optionMenu(\"UI_orientControllerMenu\", q=1, v=1) # Create controllers and", "\"top\", 8, separator01), (plusOne_UI, \"top\", 4, separator01), (plusThree_UI, \"top\", 4,", "ln=\"Twist\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Lateral_Roll\", k=1, r=1) for bone in", "sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\",", "rgb=controllerColor) # Move pivots on clavicle joint piv = cmds.xform(clavJoint,", "THIS SHIT cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1) #remove .t, .r, .s and", "# Change rotation, color for y in range(chainLen): anim_group =", "cmds.button(l=\"+1\", c=addOneUnit) plusThree_UI = cmds.button(l=\"+3\", c=addThreeUnit) separator01 = cmds.separator(h=5) separator02", "1.1, 1.2, 1.3, 1.4, 1.5], n=side + \"hand_ik_anim\" ) #", "cmds.makeIdentity(fk_controller, a = 1, t = 1, r = 1,", "piv = cmds.xform(ogChain[2], q=True, ws=True, t=True) cmds.xform(ikFootControl, ws=True, piv=piv) cmds.parent(ballikHandle[0],", "\".visibility\", cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=0, dv=0)", "showUI(): global chainMenu_UI global scaleField_UI global orientControllerMenu global constraintCheckBox_UI global", "+ \".translate\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".rotate\" +", "for y in range(chainLen): anim_group = cmds.group(em=1, n=ogChain[y] + \"_fk_anim_grp\")", "switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=0,", "-------------# # Pole Vector controller ---> Sphere pvController = createSphere(nome=", "legOrArm == \"Leg\": if x == (chainLen-1): cmds.delete(ogChain[chainLen-1] + \"_fk_anim_grp\")", "+ \".FKIK_Mode\" ikSdkDriven = ogChain[x] + \"_parentConstraint1.\" + ogChain[x] +", "input from UI scaleController = cmds.intField(scaleField_UI, q=1, v=1) blendCheckbox =", "cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 1)", "cmds.matchTransform(anim_group, ogChain[y]) cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller)) cmds.parent(fk_controller, anim_group) # Set controller orientation", "\"_grp\") #snap, parent offsetGrp, set color and then make Constraint", "cmds.checkBox(label = \"orientConsts+SDK Mode\", v=0, cc= lambda state: (cmds.checkBox(blendCheckbox_UI, e=1,", "q=1, v=1) blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1, v=1) constraintCheckBox = cmds.checkBox(constraintCheckBox_UI,", "switcherLoc = cmds.spaceLocator(n=side + chainMenu + \"_ikfk_Switch\") switcherLocGrp = cmds.group(em=1,", "as the user wishes. Maybe this can be improved orientControllerMenu", "= True if vis == \"Leg\": asd = False cmds.checkBox(clavCheckbox_UI,", "1.789), (0, 0, 2.39)], k=[0,1,2,3,4,5,6,7,8,9,10], n=side + \"leg_anim_ik\") # Rename", "cmds.setAttr(switcherLoc[0] + \".scale\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".visibility\",", "= cmds.xform(clavJoint, q=True, ws=True, t=True) cmds.xform(clavController, ws=True, piv=piv) cmds.xform(clavControllerGrp, ws=True,", "\".visibility\", cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=1, dv=1)", "cmds.button(l=\"Duplicate Chain\", c=partial(duplicateChain, blendNodeFunc, constraintFunc)) cmds.formLayout(mainLayout, e=1, attachForm = [", "0.5), (-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5), (-0.5, 0.5, -0.5),", "7), (blendCheckbox_UI, \"left\", 5), (separator01, \"left\", 1), (separator01, \"right\", 2),", "# Scale the UI becase you'll never know scaleControllerText =", "ee=ikHandJoint, n=side + \"hand_ikHandle\", sol=\"ikSCsolver\") cmds.parent(handikHandle[0], armikHandle[0]) #create IK controller", "cmds.circle(n=ogChain[y] + \"_fk_anim\")[0] # If not [0] it'll warn some", "dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def legIK(ikFootScale, legikHandle,", "lambda state: (cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1))) clavCheckbox_UI = cmds.checkBox(l=\"Clavicle\", vis=0) #", "projV arrowV*= 10 #distance from joint finalV = arrowV +", "\"top\", 5, chainMenu_UI), (blendCheckbox_UI, \"top\", 5, chainMenu_UI), (separator01, \"top\", 5,", "findPoleVector(loc=pvController, targetHandle=legikHandle[0]) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Leg_Foot\",", "t = 1, r = 1, s = 0) cmds.delete(fk_controller,", "global chainMenu_UI global scaleField_UI global orientControllerMenu global constraintCheckBox_UI global blendCheckbox_UI", "v=1) constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1, v=1) chainMenu = cmds.optionMenu(\"chainMenu_UI\", q=1,", "vis == \"Leg\": asd = False cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd)", "(plusThree_UI, \"top\", 4, separator01), (separator02, \"top\", 6, scaleField_UI), (orientControllerMenu, \"top\",", "v=0) if side == \"l_\": controllerColor = rgb=(0, 0, 255)", "cmds.rotate(0,0,90, fk_controller) # Freeze transform, delete history and set color", "targetHandle): # This func is kinda black magic # All", "dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0,", "ogChain = cmds.listRelatives(ogRootchain, ad = True, type = \"joint\") ogChain.append(ogRootchain)", "x == (chainLen-1): cmds.delete(ogChain[chainLen-1] + \"_fk_anim_grp\") def ikChainBuild(scaleIK, HandleName): masterIkHandle", "#-------------------- (execButton, \"bottom\", 5), (execButton, \"left\", 5), (execButton, \"right\", 5),", "\"joint\")[0] ogChain = cmds.listRelatives(ogRootchain, ad = True, type = \"joint\")", "if orientController == \"z\": cmds.rotate(0,0,90, fk_controller) # Freeze transform, delete", "selectChain) fkControllerCreator(scaleController, selectChain) def fkControllerCreator(fkSize, legOrArm): orientController = cmds.optionMenu(\"UI_orientControllerMenu\", q=1,", "0) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\",", "mainLayout = cmds.formLayout(nd=50) # Useful in selecting which chain: Leg", "ch = 1) cmds.color(fk_controller, rgb=controllerColor) # Set SDK visibility sdkDriver", "cmds.addAttr(switcherLoc, ln=\"FKIK_Mode\", at=\"short\", min=0, max=1, k=1, r=1) cmds.move(0,0,-12, switcherLocGrp, r=1)", "\"Shape\") ikFootControlGrp = cmds.group(em=1, n=ikFootControl + \"_grp\") cmds.parent(ikFootControl, ikFootControlGrp) #", "# Useful in selecting which chain: Leg or Arm? chainMenu_UI", "chainMenu) if constraintCheckBox == 1: constraintFunc(scaleController, chainMenu) if clavCheckbox ==", "# Create controllers and group offsets # Change rotation, color", "at=\"enum\", enumName = \"------\", ln=bone, k=1, r=1) for coord in", "# Create attributes on ikController cmds.addAttr(ikFootControl, at=\"enum\",enumName = \"------\", ln=\"Attributes\",", "\"top\", 7), (blendCheckbox_UI, \"left\", 5), (separator01, \"left\", 1), (separator01, \"right\",", "cmds.group(em=1, n=loc + \"_grp\") #snap, parent offsetGrp, set color and", "fk_controller) cmds.matchTransform(anim_group, ogChain[y]) cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller)) cmds.parent(fk_controller, anim_group) # Set controller", "0) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\",", "cmds.optionMenu(\"chainMenu_UI\", l=\"Which chain?\", cc=visCheck) cmds.menuItem(l=\"Leg\") cmds.menuItem(l=\"Arm\") constraintCheckBox_UI = cmds.checkBox(label =", "#yellow cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp)) cmds.parent(switcherLoc, switcherLocGrp) cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp)) cmds.addAttr(switcherLoc, ln=\"FKIK_Mode\",", "Pole Vector controller ---> Sphere pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController,", "fkSdkDriven = ogChain[x] + \"_parentConstraint1.\" + ogChain[x] + \"_fkW1\" #", "0.5, 0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (-0.5, -0.5,", "Builder\", w=300, h=300, s=1) mainLayout = cmds.formLayout(nd=50) # Useful in", "\"left\", 5), (plusOne_UI, \"right\", 5), (plusThree_UI, \"right\", 5), (scaleControllerText, \"left\",", "legikHandle, pvName): ballikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ogChain[3] + \"_ik\",", "], attachPosition = [#(clavCheckbox_UI, \"right\", 0, 10), (constraintCheckBox_UI, \"left\", 0,", "+ \".visibility\", k=0, l=1) # Create hierarchy groups rigGrp =", "cmds.joint(n = newJointName) cmds.matchTransform(newJointName, ogChain[i]) cmds.makeIdentity(newJointName, a = 1, t", "color cmds.scale(ikFootScale, ikFootScale, ikFootScale, ikFootControlGrp) cmds.move(0,-3.2,0, ikFootControl, r=1) cmds.makeIdentity(ikFootControl, a", "(0.5, 0.5, -0.5), (0.5, -0.5, -0.5), (0.5, -0.5, 0.5), (0.5,", "k=0, l=1) cmds.setAttr(switcherLoc[0] + \".scale\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0]", "\"top\", 6, scaleField_UI), (orientControllerMenu, \"top\", 6, separator02), ], attachPosition =", "cmds.move(0,-3.2,0, ikFootControl, r=1) cmds.makeIdentity(ikFootControl, a = 1, t = 1,", "(scaleField_UI, \"top\", 5, separator01), (scaleControllerText, \"top\", 8, separator01), (plusOne_UI, \"top\",", "channelbox for coord in [\"X\", \"Y\", \"Z\"]: cmds.setAttr(switcherLoc[0] + \".translate\"", "\"_fk_anim\")[0] # If not [0] it'll warn some stuff related", "c=partial(duplicateChain, blendNodeFunc, constraintFunc)) cmds.formLayout(mainLayout, e=1, attachForm = [ (chainMenu_UI, \"left\",", "startEndN = startEnd.normal() projV = startEndN * proj arrowV =", "5, chainMenu_UI), (separator01, \"top\", 5, constraintCheckBox_UI), (scaleField_UI, \"top\", 5, separator01),", "\"_fk_anim_grp\") def ikChainBuild(scaleIK, HandleName): masterIkHandle = cmds.ikHandle(sj=ogChain[0] + \"_ik\", ee=ogChain[2]", "cmds.parent(ctrlGrp, rigGrp) # Execute if blendCheckbox == 1: blendNodeFunc(scaleController, chainMenu)", "global count count = count + 3 cmds.intField(scaleField_UI, v=1+count, e=1)", "cmds.error(\"pls, select one relation type\") break newJointName = ogChain[i] +", "cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver,", "continue cmds.parent(ogChain[x] + \"_fk_anim_grp\", ogChain[x-1] + \"_fk_anim\") # Set orientConstraint", "1): cmds.deleteUI(\"switchModeUI\") myWin = cmds.window(\"switchModeUI\", t=\"IKFK Builder\", w=300, h=300, s=1)", "joint for x in range(chainLen): blendColorsNode = cmds.createNode(\"blendColors\", n =", "startV dotP = startMid * startEnd proj = float(dotP) /", "cmds.xform(loc, ws=1, t=(finalV.x, finalV.y ,finalV.z)) locGrp = cmds.group(em=1, n=loc +", "attachControl = [(clavCheckbox_UI, \"left\", 10, chainMenu_UI), (constraintCheckBox_UI, \"top\", 5, chainMenu_UI),", "cmds.rename(shapeList, ikFootControl + \"Shape\") ikFootControlGrp = cmds.group(em=1, n=ikFootControl + \"_grp\")", "the two different hierarchies cmds.select(cl = 1) cmds.parent((ogChain[0] + \"_ik\"),", "\"z\": cmds.rotate(0,0,90, fk_controller) # Freeze transform, delete history and set", "0) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=0, dv=1) # Lock .t", "cmds.scale(fixedScale, fixedScale, fixedScale, clavController) cmds.makeIdentity(clavController, a=1) cmds.move(0,10,0, clavControllerGrp, ws=1, r=1)", "legIK(ikFootScale, legikHandle, pvName): ballikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ogChain[3] +", "cmds.delete(fk_controller, ch = 1) cmds.color(fk_controller, rgb=controllerColor) # Set SDK visibility", "blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1, v=1) constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1, v=1)", "True if vis == \"Leg\": asd = False cmds.checkBox(clavCheckbox_UI, e=1,", "r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Leg_Foot\", k=1, r=1, min=0, max=1, dv=0.5)", "+ coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".scale\" + coord, k=0,", "+ \".scale\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".visibility\", k=0,", "(blendCheckbox_UI, \"left\", 5), (separator01, \"left\", 1), (separator01, \"right\", 2), #--------------------", "+ \".rotate\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".scale\" +", "fkControllerCreator(fkSize, legOrArm): orientController = cmds.optionMenu(\"UI_orientControllerMenu\", q=1, v=1) # Create controllers", "1, r = 1, s = 0) if side ==", "if orientController == \"y\": cmds.rotate(0,90,0, fk_controller) if orientController == \"z\":", "+ \"hand_ik\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", ikHandJoint)) cmds.makeIdentity(ikHandJoint, a = 1,", "pvController + \"_grp\" ,rigGrp) #set SDK visibility sdkDriver = switcherLoc[0]", "\"Z\"]: cmds.setAttr(switcherLoc[0] + \".translate\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] +", "and .s attributes #for x in [\"X\", \"Y\", \"Z\"]: #cmds.setAttr(fk_controller", "= cmds.group(em=1, n= side + chainMenu + \"_rig_grp\") ctrlGrp =", "cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController +", "+ \"_ctrl_grp\") cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp)) cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp)) cmds.parent(ctrlGrp, rigGrp) # Execute", "0.5, 0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (0.5, 0.5,", "10), (constraintCheckBox_UI, \"left\", 0, 26), (blendCheckbox_UI, \"right\", 10, 24), (scaleControllerText,", "3 cmds.intField(scaleField_UI, v=1+count, e=1) def blendNodeFunc(scaleController, selectChain): # Create some", "Maybe this can be improved orientControllerMenu = cmds.optionMenu(\"UI_orientControllerMenu\", l=\"What's the", "startMid * startEnd proj = float(dotP) / float(startEnd.length()) startEndN =", "True, type = \"joint\")[0] ogChain = cmds.listRelatives(ogRootchain, ad = True,", "joint piv = cmds.xform(ogChain[2], q=True, ws=True, t=True) cmds.xform(ikFootControl, ws=True, piv=piv)", "asd = False cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd) # Buttons +1", "-0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5),", "# Set size, freeze transform, create offset group and color", "= 3 #suffix for the new chains newJointList = [\"_ik\",", "Parent ik and fk chain under clavicle controller cmds.parent((ogChain[0]+\"_fk_anim_grp\"),(ogChain[0] +", "q=1, v=0) if side == \"l_\": controllerColor = rgb=(0, 0,", "= 1): cmds.deleteUI(\"switchModeUI\") myWin = cmds.window(\"switchModeUI\", t=\"IKFK Builder\", w=300, h=300,", "+ ogChain[x] + \"_fkW1\" # Setup SDK cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikSdkDriven,", "side = ogRootchain[0:2] # Initialize input from UI scaleController =", "+ \"Shape\") ikFootControlGrp = cmds.group(em=1, n=ikFootControl + \"_grp\") cmds.parent(ikFootControl, ikFootControlGrp)", "+ \"_ikfk_Switch\") switcherLocGrp = cmds.group(em=1, n=switcherLoc[0] + \"_grp\") cmds.color(switcherLoc, rgb=(255,", "# Create offset group, FDH and move up clavControllerGrp =", "switching IK/FK mode and snap it between two joints switcherLoc", "chainMenu_UI), (blendCheckbox_UI, \"top\", 5, chainMenu_UI), (separator01, \"top\", 5, constraintCheckBox_UI), (scaleField_UI,", "0, 0), (0.997, 0, 1.789), (0, 0, 2.39)], k=[0,1,2,3,4,5,6,7,8,9,10], n=side", "n = ogChain[x] + \"_blend\") # Connect FK and IK", "(scaleField_UI, \"left\", 110, 0), #(scaleField_UI, \"right\",0, 40), (plusOne_UI, \"right\", 0,", "q=1, ws=1, t=1) mid = cmds.xform(ogChain[1], q=1, ws=1, t=1) end", "_rig_GRP cmds.parent(ikFootControlGrp, pvController + \"_grp\" ,rigGrp) # Set SDK visibility", "chainLen = 5 else: #this is totally unscalable but for", "and PV under _rig_GRP cmds.parent(crvIkCubeGrp, pvController + \"_grp\" ,rigGrp) #set", "of the joint for x in range(chainLen): # Setup orient", "ad = True, type = \"joint\") ogChain.append(ogRootchain) ogChain.reverse() side =", "ikFootControlGrp) # Set size, freeze transform, create offset group and", "rotation, color for y in range(chainLen): anim_group = cmds.group(em=1, n=ogChain[y]", "If leg chain is selected delete toe controller, else not", "+ \"_ikW0\" fkSdkDriven = ogChain[x] + \"_parentConstraint1.\" + ogChain[x] +", "plusOne_UI = cmds.button(l=\"+1\", c=addOneUnit) plusThree_UI = cmds.button(l=\"+3\", c=addThreeUnit) separator01 =", "+ x, k=0, l=1) #cmds.setAttr(fk_controller + \".scale\" + x, k=0,", "import createClav2, createSphere import maya.cmds as cmds import maya.OpenMaya as", "break newJointName = ogChain[i] + newJoint #create a joint, copy", "+ \"_ik\", ikHandJoint)) cmds.makeIdentity(ikHandJoint, a = 1, t = 1,", "v=1, dv=1) def legIK(ikFootScale, legikHandle, pvName): ballikHandle = cmds.ikHandle(sj=ogChain[2] +", "coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".scale\" + coord, k=0, l=1)", "#cmds.setAttr(fk_controller + \".scale\" + x, k=0, l=1) # Create ordered", "chain: Leg or Arm? chainMenu_UI = cmds.optionMenu(\"chainMenu_UI\", l=\"Which chain?\", cc=visCheck)", "pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=legikHandle[0]) cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1,", "in newJointList: for i in range(chainLen): if blendCheckbox == 0", "0, 45), (plusThree_UI, \"right\", 0, 49) ] ) cmds.showWindow(myWin) showUI()", "coord in [\"X\", \"Y\", \"Z\"]: cmds.setAttr(switcherLoc[0] + \".translate\" + coord,", "rgb=controllerColor) # Set SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\"", "ikFootControlGrp)) cmds.color(ikFootControl, rgb=controllerColor) # pivot snapping on ankle joint piv", "0), #(scaleField_UI, \"right\",0, 40), (plusOne_UI, \"right\", 0, 45), (plusThree_UI, \"right\",", "x in [\"X\", \"Y\", \"Z\"]: #cmds.setAttr(fk_controller + \".translate\" + x,", "scaleControllerText = cmds.text(l=\"Controllers size\") scaleField_UI = cmds.intField(en=10, v=1, min=1) plusOne_UI", "h=300, s=1) mainLayout = cmds.formLayout(nd=50) # Useful in selecting which", "9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5], n=side + \"hand_ik_anim\"", "em=1) cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp)) cmds.parent(clavController, clavControllerGrp) fixedScale = scaleClav/4 cmds.scale(fixedScale, fixedScale,", "1) cmds.color(fk_controller, rgb=controllerColor) # Set SDK visibility sdkDriver = switcherLoc[0]", "elif side == \"r_\": controllerColor = rgb=(255, 0, 0) if", "= True) cmds.rename(shapeList, ikFootControl + \"Shape\") ikFootControlGrp = cmds.group(em=1, n=ikFootControl", "ikFootControlGrp) cmds.move(0,-3.2,0, ikFootControl, r=1) cmds.makeIdentity(ikFootControl, a = 1, t =", "= cmds.circle(n=ogChain[y] + \"_fk_anim\")[0] # If not [0] it'll warn", "\"top\", 5), (chainMenu_UI, \"right\", 80), (clavCheckbox_UI, \"top\", 7), (blendCheckbox_UI, \"left\",", "delete toe controller, else not if legOrArm == \"Leg\": if", "cmds.menuItem(l=\"z\") # Scale the UI becase you'll never know scaleControllerText", "k=1, r=1, min=0, max=1, dv=0.5) # Parent ikController and PV", "+ \"_ik\", sol=\"ikSCsolver\", n=side + \"toe_ikHandle\") # Create and place", "global plusOne_UI global plusThree_UI global clavCheckbox_UI if cmds.window(\"switchModeUI\", ex =", "constraintCheckBox == 0: cmds.error(\"pls, select one relation type\") break newJointName", "as cmds import maya.OpenMaya as om from functools import partial", "= newJointName) cmds.matchTransform(newJointName, ogChain[i]) cmds.makeIdentity(newJointName, a = 1, t =", "cc= lambda state: (cmds.checkBox(blendCheckbox_UI, e=1, en=state-1))) blendCheckbox_UI = cmds.checkBox(label =", "blendCheckbox == 0 and constraintCheckBox == 0: cmds.error(\"pls, select one", "cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1) ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController,", "cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver,", "n=side + \"leg_anim_ik\") # Rename shape node shapeList = cmds.listRelatives(ikFootControl,", "== \"z\": cmds.rotate(0,0,90, fk_controller) # Freeze transform, delete history and", "# Setup orient constraints cmds.parentConstraint((ogChain[x] + \"_ik\"), ogChain[x]) cmds.parentConstraint((ogChain[x] +", "\"top\", 5, chainMenu_UI), (separator01, \"top\", 5, constraintCheckBox_UI), (scaleField_UI, \"top\", 5,", "the new chains newJointList = [\"_ik\", \"_fk\", \"_scale\"] for newJoint", "with the same name of the joint for x in", "cmds.rotate(90,0,0, fk_controller) if orientController == \"y\": cmds.rotate(0,90,0, fk_controller) if orientController", "which chain: Leg or Arm? chainMenu_UI = cmds.optionMenu(\"chainMenu_UI\", l=\"Which chain?\",", "t=1, r=1, s=1) cmds.color(loc, rgb=controllerColor) cmds.poleVectorConstraint(loc, targetHandle) def showUI(): global", "k=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,", "= True) cmds.setAttr(ogChain[0] + \"_ik.visibility\", 0) cmds.setAttr(ogChain[0] + \"_fk.visibility\", 0)", "(0.997, 0, 1.789), (0, 0, 2.39)], k=[0,1,2,3,4,5,6,7,8,9,10], n=side + \"leg_anim_ik\")", "r=1) cmds.addAttr(ikFootControl, ln=\"Twist\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Lateral_Roll\", k=1, r=1) for", "bone in [\"Ankle\", \"Ball\", \"Toe_Tap\"]: cmds.addAttr(ikFootControl, at=\"enum\", enumName = \"------\",", "8), (orientControllerMenu, \"top\", 5), #-------------------- (execButton, \"bottom\", 5), (execButton, \"left\",", "[\"Ankle\", \"Ball\", \"Toe_Tap\"]: cmds.addAttr(ikFootControl, at=\"enum\", enumName = \"------\", ln=bone, k=1,", "\"toe_ikHandle\") # Create and place ik controller ikFootControl = cmds.curve(d=2,", "cmds.color(crvIkCube, rgb=controllerColor) cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp) cmds.parent(armikHandle[0], crvIkCube) pvController =", "moving up and put it at the top of the", "ogRootchain[0:2] # Initialize input from UI scaleController = cmds.intField(scaleField_UI, q=1,", "i in range(chainLen): if blendCheckbox == 0 and constraintCheckBox ==", "\"_ctrl_grp\") cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp)) cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp)) cmds.parent(ctrlGrp, rigGrp) # Execute if", "stuff related to Maya underworld # Set scale cmds.scale(fkSize, fkSize,", "\"Leg\": chainLen = 5 else: #this is totally unscalable but", "you'll never know scaleControllerText = cmds.text(l=\"Controllers size\") scaleField_UI = cmds.intField(en=10,", "# If leg chain is selected delete toe controller, else", "\"top\", 6, separator02), ], attachPosition = [#(clavCheckbox_UI, \"right\", 0, 10),", "orient constraints cmds.parentConstraint((ogChain[x] + \"_ik\"), ogChain[x]) cmds.parentConstraint((ogChain[x] + \"_fk\"), ogChain[x])", "if blendCheckbox == 1: blendNodeFunc(scaleController, chainMenu) if constraintCheckBox == 1:", "node with the same name of the joint for x", "+ \"_ik\", ikFootControlGrp)) cmds.color(ikFootControl, rgb=controllerColor) # pivot snapping on ankle", "name of the joint for x in range(chainLen): # Setup", "scaleField_UI), (orientControllerMenu, \"top\", 6, separator02), ], attachPosition = [#(clavCheckbox_UI, \"right\",", "if constraintCheckBox == 1: constraintFunc(scaleController, chainMenu) if clavCheckbox == 1:", "cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=0, dv=1) # Lock .t and", "cmds.menuItem(l=\"Leg\") cmds.menuItem(l=\"Arm\") constraintCheckBox_UI = cmds.checkBox(label = \"orientConsts+SDK Mode\", v=0, cc=", "\"_ik\") handikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ikHandJoint, n=side + \"hand_ikHandle\",", "cmds.menuItem(l=\"x\") cmds.menuItem(l=\"y\") cmds.menuItem(l=\"z\") # Scale the UI becase you'll never", "it'll warn some stuff related to Maya underworld # Set", "magic # All credits to https://vimeo.com/66015036 start = cmds.xform(ogChain[0], q=1,", "+ \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=0, dv=0)", "\"_fk.rotate\"), blendColorsNode + \".color2\") cmds.connectAttr((blendColorsNode + \".output\"), (ogChain[x] + \".rotate\"", "cmds.color(clavController, rgb=controllerColor) # Move pivots on clavicle joint piv =", "\"_grp\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", crvIkCubeGrp)) cmds.color(crvIkCube, rgb=controllerColor) cmds.scale(armIkScale, armIkScale, armIkScale,", "under clavicle controller cmds.parent((ogChain[0]+\"_fk_anim_grp\"),(ogChain[0] + \"_ik\"), (ogChain[0] + \"_fk\"), clavController)", "def blendNodeFunc(scaleController, selectChain): # Create some blendColors node with the", "start[1], start[2]) midV = om.MVector(mid[0], mid[1], mid[2]) endV = om.MVector(end[0],", "startV startMid = midV - startV dotP = startMid *", "black magic # All credits to https://vimeo.com/66015036 start = cmds.xform(ogChain[0],", "chainMenu_UI), (constraintCheckBox_UI, \"top\", 5, chainMenu_UI), (blendCheckbox_UI, \"top\", 5, chainMenu_UI), (separator01,", "and freeze transform cmds.joint(n = newJointName) cmds.matchTransform(newJointName, ogChain[i]) cmds.makeIdentity(newJointName, a", "# Execute if blendCheckbox == 1: blendNodeFunc(scaleController, chainMenu) if constraintCheckBox", "\".rotate\" )) cmds.connectAttr(switcherLoc[0]+\".FKIK_Mode\", blendColorsNode + \".blender\") ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain)", "# Create some blendColors node with the same name of", "-0.5, 0.5), (0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5,", "switcherLocGrp, r=1) #IMPROVE THIS SHIT cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1) #remove .t,", "\".visibility\", 0) if HandleName == \"Arm\": #print (\"scaleController\", scaleField_UI) armIk(scaleIK,", "---> Sphere pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=legikHandle[0]) cmds.addAttr(pvController, ln=\"Follow\",", "s=1) cmds.color(loc, rgb=controllerColor) cmds.poleVectorConstraint(loc, targetHandle) def showUI(): global chainMenu_UI global", "vis=0) # Useful in orienting FK controllers as the user", "min=0, max=1, k=1, r=1) cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE THIS SHIT", "newJointName) cmds.matchTransform(newJointName, ogChain[i]) cmds.makeIdentity(newJointName, a = 1, t = 0,", "orienting FK controllers as the user wishes. Maybe this can", "n=side + \"hand_ik_anim\" ) # Rename shape node shapeList =", "# Create a locator used for switching IK/FK mode and", "om.MVector(end[0], end[1], end[2]) startEnd = endV - startV startMid =", "handikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ikHandJoint, n=side + \"hand_ikHandle\", sol=\"ikSCsolver\")", "def showUI(): global chainMenu_UI global scaleField_UI global orientControllerMenu global constraintCheckBox_UI", "\"_parentConstraint1.\" + ogChain[x] + \"_ikW0\" fkSdkDriven = ogChain[x] + \"_parentConstraint1.\"", "(scaleControllerText, \"top\", 8, separator01), (plusOne_UI, \"top\", 4, separator01), (plusThree_UI, \"top\",", "newJointList = [\"_ik\", \"_fk\", \"_scale\"] for newJoint in newJointList: for", "newJoint #create a joint, copy their position and freeze transform", "0) cmds.setAttr(ogChain[0] + \"_fk.visibility\", 0) # Create a locator used", "plusOne_UI global plusThree_UI global clavCheckbox_UI if cmds.window(\"switchModeUI\", ex = 1):", "(separator02, \"right\", 2), #-------------------- (orientControllerMenu, \"left\", 8), (orientControllerMenu, \"top\", 5),", "c=addThreeUnit) separator01 = cmds.separator(h=5) separator02 = cmds.separator(h=5) # execButton =", "range(chainLen): # Setup orient constraints cmds.parentConstraint((ogChain[x] + \"_ik\"), ogChain[x]) cmds.parentConstraint((ogChain[x]", "== (chainLen-1): cmds.delete(ogChain[chainLen-1] + \"_fk_anim_grp\") def ikChainBuild(scaleIK, HandleName): masterIkHandle =", "Pole Vector -------------# # Pole Vector controller ---> Sphere pvController", "(cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1))) clavCheckbox_UI = cmds.checkBox(l=\"Clavicle\", vis=0) # Useful in", "Set SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 1)", "FDH and move up clavControllerGrp = cmds.group(n=clavController + \"_grp\", em=1)", "mid = cmds.xform(ogChain[1], q=1, ws=1, t=1) end = cmds.xform(ogChain[2], q=1,", "cmds.parentConstraint((ogChain[x] + \"_fk\"), ogChain[x]) # Setup SDK naming convention sdkDriver", "startV = om.MVector(start[0], start[1], start[2]) midV = om.MVector(mid[0], mid[1], mid[2])", "0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5),", "ws=1, t=1) mid = cmds.xform(ogChain[1], q=1, ws=1, t=1) end =", "one relation type\") break newJointName = ogChain[i] + newJoint #create", "(ogChain[0] + \"_fk\"), clavController) cmds.parent(clavControllerGrp, ctrlGrp) def visCheck(vis): if vis", "= 1) cmds.delete(ikFootControl, ch = 1) cmds.delete(cmds.pointConstraint(ogChain[3] + \"_ik\", ikFootControlGrp))", "blendNodeFunc, constraintFunc)) cmds.formLayout(mainLayout, e=1, attachForm = [ (chainMenu_UI, \"left\", 8),", "[0] it'll warn some stuff related to Maya underworld #", "cd=sdkDriver, v=0, dv=1) # Lock .t and .s attributes #for", "dv=0.5) # Parent ikController and PV under _rig_GRP cmds.parent(crvIkCubeGrp, pvController", "= 1) cmds.color(fk_controller, rgb=controllerColor) # Set SDK visibility sdkDriver =", "= switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver,", "ikFootControl) #---------- Making Pole Vector -------------# # Pole Vector controller", "r=1, os=1) cmds.parent(ikHandJoint, ogChain[2] + \"_ik\") handikHandle = cmds.ikHandle(sj=ogChain[2] +", "cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(crvIkCubeGrp +", "for i in range(chainLen): if blendCheckbox == 0 and constraintCheckBox", "+ \".FKIK_Mode\" cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=1, dv=0)", "anim_group) # Set controller orientation based on second axis if", "proj arrowV = startMid - projV arrowV*= 10 #distance from", "fk_controller) # Freeze transform, delete history and set color cmds.makeIdentity(fk_controller,", "5, constraintCheckBox_UI), (scaleField_UI, \"top\", 5, separator01), (scaleControllerText, \"top\", 8, separator01),", "(-0.5, -0.5, -0.5), (-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (-0.5,", "#IMPROVE THIS SHIT cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1) #remove .t, .r, .s", "blendColorsNode + \".color1\") cmds.connectAttr((ogChain[x] + \"_fk.rotate\"), blendColorsNode + \".color2\") cmds.connectAttr((blendColorsNode", "(0, 0, 2.39), (-0.997,0,1.789), (-1.108, 0, 0), (-0.784, 0,-2.5), (0,", "cmds.scale(fkSize, fkSize, fkSize, fk_controller) cmds.matchTransform(anim_group, ogChain[y]) cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller)) cmds.parent(fk_controller, anim_group)", "0 def addOneUnit(*args): global count count = count + 1", "attributes on ikController cmds.addAttr(ikFootControl, at=\"enum\",enumName = \"------\", ln=\"Attributes\", k=1, r=1)", "10, chainMenu_UI), (constraintCheckBox_UI, \"top\", 5, chainMenu_UI), (blendCheckbox_UI, \"top\", 5, chainMenu_UI),", "#set SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0)", "+ \"_ik.visibility\", 0) cmds.setAttr(ogChain[0] + \"_fk.visibility\", 0) # Create a", "Parent ikController and PV under _rig_GRP cmds.parent(crvIkCubeGrp, pvController + \"_grp\"", "0.5, -0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (0.5, -0.5,", "armIk(armIkScale, armikHandle, pvName): ikHandJoint = cmds.joint(n=side + \"hand_ik\") cmds.delete(cmds.parentConstraint(ogChain[2] +", "cmds.parent(clavControllerGrp, ctrlGrp) def visCheck(vis): if vis == \"Arm\": asd =", "constraintCheckBox_UI), (scaleField_UI, \"top\", 5, separator01), (scaleControllerText, \"top\", 8, separator01), (plusOne_UI,", "user wishes. Maybe this can be improved orientControllerMenu = cmds.optionMenu(\"UI_orientControllerMenu\",", "piv=piv) cmds.orientConstraint(clavController, clavJoint) # Parent ik and fk chain under", "\"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Twist\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Lateral_Roll\",", "cmds.parent(armikHandle[0], crvIkCube) pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=armikHandle[0]) cmds.addAttr(pvController, at=\"enum\",", "3 #suffix for the new chains newJointList = [\"_ik\", \"_fk\",", "shapeList = cmds.listRelatives(crvIkCube, s = True) cmds.rename(shapeList, crvIkCube + \"Shape\")", "\"_ik.rotate\"), blendColorsNode + \".color1\") cmds.connectAttr((ogChain[x] + \"_fk.rotate\"), blendColorsNode + \".color2\")", "Parent ikController and PV under _rig_GRP cmds.parent(ikFootControlGrp, pvController + \"_grp\"", "cmds.group(em=1, n=ogChain[y] + \"_fk_anim_grp\") fk_controller = cmds.circle(n=ogChain[y] + \"_fk_anim\")[0] #", "min=0, max=1, dv=0.5) # Create attributes on ikController cmds.addAttr(ikFootControl, at=\"enum\",enumName", "# Import coordinates from ctrlUI_lib cmds.delete(cmds.pointConstraint(clavJoint, clavController)) # Create offset", "clavController)) # Create offset group, FDH and move up clavControllerGrp", "(separator01, \"right\", 2), #-------------------- (scaleField_UI, \"right\", 65), (scaleField_UI, \"left\", 5),", "cmds.setAttr(ogChain[0] + \"_fk.visibility\", 0) # Create a locator used for", "cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController +", "q=1, ws=1, t=1) end = cmds.xform(ogChain[2], q=1, ws=1, t=1) startV", "now it's ok chainLen = 3 #suffix for the new", "ctrlGrp) def visCheck(vis): if vis == \"Arm\": asd = True", "armikHandle, pvName): ikHandJoint = cmds.joint(n=side + \"hand_ik\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\",", "to https://vimeo.com/66015036 start = cmds.xform(ogChain[0], q=1, ws=1, t=1) mid =", "- projV arrowV*= 10 #distance from joint finalV = arrowV", "s=1) mainLayout = cmds.formLayout(nd=50) # Useful in selecting which chain:", "0) if chainMenu == \"Leg\": chainLen = 5 else: #this", "l=1) cmds.setAttr(switcherLoc[0] + \".scale\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] +", "r=1, os=1) else: cmds.move(-10,0,0, ikHandJoint, r=1, os=1) cmds.parent(ikHandJoint, ogChain[2] +", "Vector -------------# # Pole Vector controller ---> Sphere pvController =", "\"_fk\"), clavController) cmds.parent(clavControllerGrp, ctrlGrp) def visCheck(vis): if vis == \"Arm\":", "cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=1,", "controllerColor = rgb=(0, 0, 255) elif side == \"r_\": controllerColor", "if cmds.window(\"switchModeUI\", ex = 1): cmds.deleteUI(\"switchModeUI\") myWin = cmds.window(\"switchModeUI\", t=\"IKFK", "cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikSdkDriven,", "= cmds.group(em=1, n= side + chainMenu + \"_ctrl_grp\") cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp))", "\".FKIK_Mode\" ikSdkDriven = ogChain[x] + \"_parentConstraint1.\" + ogChain[x] + \"_ikW0\"", "t = 0, r = 1, s = 0) #deselect", ")) cmds.connectAttr(switcherLoc[0]+\".FKIK_Mode\", blendColorsNode + \".blender\") ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def", "\"blendColor Mode\", v=0, cc= lambda state: (cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1))) clavCheckbox_UI", "en=state-1))) clavCheckbox_UI = cmds.checkBox(l=\"Clavicle\", vis=0) # Useful in orienting FK", "cmds.listRelatives(ogRootchain, ad = True, type = \"joint\") ogChain.append(ogRootchain) ogChain.reverse() side", "enumName = \"------\", ln=bone, k=1, r=1) for coord in [\"X\",", "rgb=(255, 0, 0) if chainMenu == \"Leg\": chainLen = 5", "+ \"_fk_anim_grp\", ctrlGrp) cmds.parent(switcherLocGrp, rigGrp) def clavSel(scaleClav): # Select clavicle", "# This func is kinda black magic # All credits", "a=1) cmds.move(0,10,0, clavControllerGrp, ws=1, r=1) cmds.color(clavController, rgb=controllerColor) # Move pivots", "cmds.addAttr(ikFootControl, ln=\"Twist\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Lateral_Roll\", k=1, r=1) for bone", "\"right\", 5), (scaleControllerText, \"left\", 5), (separator02, \"left\", 1), (separator02, \"right\",", "l=1) # Create ordered hierarchy for x in reversed(range(chainLen)): if", "rigGrp)) cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp)) cmds.parent(ctrlGrp, rigGrp) # Execute if blendCheckbox ==", "0, 2.39)], k=[0,1,2,3,4,5,6,7,8,9,10], n=side + \"leg_anim_ik\") # Rename shape node", "t=(finalV.x, finalV.y ,finalV.z)) locGrp = cmds.group(em=1, n=loc + \"_grp\") #snap,", "cmds.intField(en=10, v=1, min=1) plusOne_UI = cmds.button(l=\"+1\", c=addOneUnit) plusThree_UI = cmds.button(l=\"+3\",", "def clavSel(scaleClav): # Select clavicle Joint moving up and put", "= arrowV + midV cmds.xform(loc, ws=1, t=(finalV.x, finalV.y ,finalV.z)) locGrp", "orientation based on second axis if orientController == \"x\": cmds.rotate(90,0,0,", "os=1) cmds.parent(ikHandJoint, ogChain[2] + \"_ik\") handikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\",", "cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0], ikFootControl) #---------- Making Pole Vector -------------# #", "ln=\"Attributes\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Twist\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Lateral_Roll\", k=1,", "separator01), (scaleControllerText, \"top\", 8, separator01), (plusOne_UI, \"top\", 4, separator01), (plusThree_UI,", "ogChain[x] + \"_fk\") # If leg chain is selected delete", "q=1, v=1) # Create controllers and group offsets # Change", "\"_fk_anim_grp.visibility\", cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver,", "separator01 = cmds.separator(h=5) separator02 = cmds.separator(h=5) # execButton = cmds.button(l=\"Duplicate", "cmds.ikHandle(sj=ogChain[3] + \"_ik\", ee=ogChain[4] + \"_ik\", sol=\"ikSCsolver\", n=side + \"toe_ikHandle\")", "q=1, v=1) chainMenu = cmds.optionMenu(\"chainMenu_UI\", q=1, v=1) clavCheckbox = cmds.checkBox(clavCheckbox_UI,", "= cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ikHandJoint, n=side + \"hand_ikHandle\", sol=\"ikSCsolver\") cmds.parent(handikHandle[0],", "and .v from the channelbox for coord in [\"X\", \"Y\",", "(plusOne_UI, \"top\", 4, separator01), (plusThree_UI, \"top\", 4, separator01), (separator02, \"top\",", "\"_fk\"), ogChain[x]) # Setup SDK naming convention sdkDriver = switcherLoc[0]", "r=1) # Parent ikController and PV under _rig_GRP cmds.parent(ikFootControlGrp, pvController", "in [\"X\", \"Y\", \"Z\"]: #cmds.setAttr(fk_controller + \".translate\" + x, k=0,", "[#(clavCheckbox_UI, \"right\", 0, 10), (constraintCheckBox_UI, \"left\", 0, 26), (blendCheckbox_UI, \"right\",", "= cmds.ikHandle(sj=ogChain[0] + \"_ik\", ee=ogChain[2] + \"_ik\", sol=\"ikRPsolver\", n=side +", "# Move pivots on clavicle joint piv = cmds.xform(clavJoint, q=True,", "\".FKIK_Mode\" cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=1, dv=0) cmds.setAttr(sdkDriver,", "k=1, r=1) for coord in [\"X\", \"Y\", \"Z\"]: cmds.addAttr(ikFootControl, ln=bone+coord,", "cmds.xform(ikFootControl, ws=True, piv=piv) cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0], ikFootControl) #---------- Making Pole", "\"right\", 0, 10), (constraintCheckBox_UI, \"left\", 0, 26), (blendCheckbox_UI, \"right\", 10,", "to Maya underworld # Set scale cmds.scale(fkSize, fkSize, fkSize, fk_controller)", "= om.MVector(start[0], start[1], start[2]) midV = om.MVector(mid[0], mid[1], mid[2]) endV", "orientController == \"y\": cmds.rotate(0,90,0, fk_controller) if orientController == \"z\": cmds.rotate(0,0,90,", "offset group, FDH and move up clavControllerGrp = cmds.group(n=clavController +", "ogChain[0] + \"_fk\", ctrlGrp) cmds.parent(ogChain[0] + \"_fk_anim_grp\", ctrlGrp) cmds.parent(switcherLocGrp, rigGrp)", "and group offsets # Change rotation, color for y in", "IK/FK mode and snap it between two joints switcherLoc =", "cmds.scale(ikFootScale, ikFootScale, ikFootScale, ikFootControlGrp) cmds.move(0,-3.2,0, ikFootControl, r=1) cmds.makeIdentity(ikFootControl, a =", "\"left\", 5), (separator01, \"left\", 1), (separator01, \"right\", 2), #-------------------- (scaleField_UI,", "type = \"joint\") ogChain.append(ogRootchain) ogChain.reverse() side = ogRootchain[0:2] # Initialize", "== 1: blendNodeFunc(scaleController, chainMenu) if constraintCheckBox == 1: constraintFunc(scaleController, chainMenu)", "and place ik controller ikFootControl = cmds.curve(d=2, p=[(0.997, 0, 1.789),", "dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController", "# All credits to https://vimeo.com/66015036 start = cmds.xform(ogChain[0], q=1, ws=1,", "= 5 else: #this is totally unscalable but for now", "constraintCheckBox_UI = cmds.checkBox(label = \"orientConsts+SDK Mode\", v=0, cc= lambda state:", ".r, .s and .v from the channelbox for coord in", "2), #-------------------- (scaleField_UI, \"right\", 65), (scaleField_UI, \"left\", 5), (plusOne_UI, \"right\",", "in range(chainLen): if blendCheckbox == 0 and constraintCheckBox == 0:", "+ 3 cmds.intField(scaleField_UI, v=1+count, e=1) def blendNodeFunc(scaleController, selectChain): # Create", ",finalV.z)) locGrp = cmds.group(em=1, n=loc + \"_grp\") #snap, parent offsetGrp,", "-0.5), (-0.5, -0.5, -0.5)], k=[0, 1, 2, 3, 4, 5,", "fk_controller)) cmds.parent(fk_controller, anim_group) # Set controller orientation based on second", "based on second axis if orientController == \"x\": cmds.rotate(90,0,0, fk_controller)", "+ \"_fk_anim\", ogChain[x] + \"_fk\") # If leg chain is", "the original joint chain cmds.connectAttr((ogChain[x] + \"_ik.rotate\"), blendColorsNode + \".color1\")", "== 0: continue cmds.parent(ogChain[x] + \"_fk_anim_grp\", ogChain[x-1] + \"_fk_anim\") #", "constraintCheckBox_UI global blendCheckbox_UI global plusOne_UI global plusThree_UI global clavCheckbox_UI if", "+ \".rotate\" )) cmds.connectAttr(switcherLoc[0]+\".FKIK_Mode\", blendColorsNode + \".blender\") ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController,", "ikController cmds.addAttr(ikFootControl, at=\"enum\",enumName = \"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Twist\",", "ikFootControl = cmds.curve(d=2, p=[(0.997, 0, 1.789), (0, 0, 2.39), (-0.997,0,1.789),", "10 #distance from joint finalV = arrowV + midV cmds.xform(loc,", "= cmds.group(n=crvIkCube + \"_grp\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", crvIkCubeGrp)) cmds.color(crvIkCube, rgb=controllerColor)", "\".color2\") cmds.connectAttr((blendColorsNode + \".output\"), (ogChain[x] + \".rotate\" )) cmds.connectAttr(switcherLoc[0]+\".FKIK_Mode\", blendColorsNode", "arrowV = startMid - projV arrowV*= 10 #distance from joint", "# Create hierarchy groups rigGrp = cmds.group(em=1, n= side +", "separator02 = cmds.separator(h=5) # execButton = cmds.button(l=\"Duplicate Chain\", c=partial(duplicateChain, blendNodeFunc,", "0.5), (0.5, 0.5, -0.5), (0.5, -0.5, -0.5), (0.5, -0.5, 0.5),", "FK controllers as the user wishes. Maybe this can be", "ordered hierarchy for x in reversed(range(chainLen)): if x == 0:", "-0.5)], k=[0, 1, 2, 3, 4, 5, 6, 7, 8,", "HandleName): masterIkHandle = cmds.ikHandle(sj=ogChain[0] + \"_ik\", ee=ogChain[2] + \"_ik\", sol=\"ikRPsolver\",", "Connect FK and IK chains into blendColors channels and then", "set color cmds.makeIdentity(fk_controller, a = 1, t = 1, r", "+ \"_blend\") # Connect FK and IK chains into blendColors", "clavicle controller cmds.parent((ogChain[0]+\"_fk_anim_grp\"),(ogChain[0] + \"_ik\"), (ogChain[0] + \"_fk\"), clavController) cmds.parent(clavControllerGrp,", "\"left\", 110, 0), #(scaleField_UI, \"right\",0, 40), (plusOne_UI, \"right\", 0, 45),", "\"_ikW0\" fkSdkDriven = ogChain[x] + \"_parentConstraint1.\" + ogChain[x] + \"_fkW1\"", "Vector controller ---> Sphere pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=legikHandle[0])", "s = 0) #deselect to make the two different hierarchies", "+ coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".rotate\" + coord, k=0,", "under _rig_GRP cmds.parent(crvIkCubeGrp, pvController + \"_grp\" ,rigGrp) #set SDK visibility", "some stuff related to Maya underworld # Set scale cmds.scale(fkSize,", "# If not [0] it'll warn some stuff related to", "separator01), (plusThree_UI, \"top\", 4, separator01), (separator02, \"top\", 6, scaleField_UI), (orientControllerMenu,", "global chainLen global switcherLoc global side global controllerColor global clavCheckbox", "Scale the UI becase you'll never know scaleControllerText = cmds.text(l=\"Controllers", "Initialize input from UI scaleController = cmds.intField(scaleField_UI, q=1, v=1) blendCheckbox", "q=True, ws=True, t=True) cmds.xform(clavController, ws=True, piv=piv) cmds.xform(clavControllerGrp, ws=True, piv=piv) cmds.orientConstraint(clavController,", "v=1) clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1, v=0) if side == \"l_\":", "endV - startV startMid = midV - startV dotP =", "cmds.xform(ogChain[2], q=True, ws=True, t=True) cmds.xform(ikFootControl, ws=True, piv=piv) cmds.parent(ballikHandle[0], toeikHandle[0], legikHandle[0],", "def legIK(ikFootScale, legikHandle, pvName): ballikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ogChain[3]", "cmds.parent(ogChain[0] + \"_fk_anim_grp\", ctrlGrp) cmds.parent(switcherLocGrp, rigGrp) def clavSel(scaleClav): # Select", "#ogChain.insert(0, clavJoint) clavController = createClav2(clavJoint + \"_anim\") # Import coordinates", "startEndN * proj arrowV = startMid - projV arrowV*= 10", "1.4, 1.5], n=side + \"hand_ik_anim\" ) # Rename shape node", "= cmds.xform(ogChain[2], q=True, ws=True, t=True) cmds.xform(ikFootControl, ws=True, piv=piv) cmds.parent(ballikHandle[0], toeikHandle[0],", "a = 1, t = 0, r = 1, s", "set color and then make Constraint cmds.delete(cmds.pointConstraint(loc, locGrp)) cmds.parent(loc, locGrp)", "e=1, vis=asd, v=asd) # Buttons +1 and +3 count =", "is selected delete toe controller, else not if legOrArm ==", "clavController) cmds.parent(clavControllerGrp, ctrlGrp) def visCheck(vis): if vis == \"Arm\": asd", "(chainLen-1): cmds.delete(ogChain[chainLen-1] + \"_fk_anim_grp\") def ikChainBuild(scaleIK, HandleName): masterIkHandle = cmds.ikHandle(sj=ogChain[0]", "used for switching IK/FK mode and snap it between two", "(cmds.checkBox(blendCheckbox_UI, e=1, en=state-1))) blendCheckbox_UI = cmds.checkBox(label = \"blendColor Mode\", v=0,", "+ chainMenu + \"_ikfk_Switch\") switcherLocGrp = cmds.group(em=1, n=switcherLoc[0] + \"_grp\")", "cmds.separator(h=5) # execButton = cmds.button(l=\"Duplicate Chain\", c=partial(duplicateChain, blendNodeFunc, constraintFunc)) cmds.formLayout(mainLayout,", "+ \"_fk_anim\")[0] # If not [0] it'll warn some stuff", "Setup SDK naming convention sdkDriver = switcherLoc[0] + \".FKIK_Mode\" ikSdkDriven", "= 1) cmds.delete(cmds.pointConstraint(ogChain[3] + \"_ik\", ikFootControlGrp)) cmds.color(ikFootControl, rgb=controllerColor) # pivot", "0, r = 1, s = 0) #deselect to make", "partial def duplicateChain(*args): global ogChain global chainLen global switcherLoc global", "Mode\", v=0, cc= lambda state: (cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1))) clavCheckbox_UI =", "om from functools import partial def duplicateChain(*args): global ogChain global", "v=1, dv=1) def findPoleVector(loc, targetHandle): # This func is kinda", "def addOneUnit(*args): global count count = count + 1 cmds.intField(scaleField_UI,", "= count + 3 cmds.intField(scaleField_UI, v=1+count, e=1) def blendNodeFunc(scaleController, selectChain):", "= cmds.group(em=1, n=switcherLoc[0] + \"_grp\") cmds.color(switcherLoc, rgb=(255, 255, 0)) #yellow", "move up clavControllerGrp = cmds.group(n=clavController + \"_grp\", em=1) cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp))", "1) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\",", "26), (blendCheckbox_UI, \"right\", 10, 24), (scaleControllerText, \"left\", 5, 0), (scaleField_UI,", "SHIT cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1) #remove .t, .r, .s and .v", "cmds.setAttr(switcherLoc[0] + \".translate\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".rotate\"", "= cmds.separator(h=5) # execButton = cmds.button(l=\"Duplicate Chain\", c=partial(duplicateChain, blendNodeFunc, constraintFunc))", "= cmds.formLayout(nd=50) # Useful in selecting which chain: Leg or", "#-------------------- (orientControllerMenu, \"left\", 8), (orientControllerMenu, \"top\", 5), #-------------------- (execButton, \"bottom\",", "legIK(scaleIK, masterIkHandle, HandleName) def armIk(armIkScale, armikHandle, pvName): ikHandJoint = cmds.joint(n=side", "ikFootControl, r=1) cmds.makeIdentity(ikFootControl, a = 1, t = 1, r", "ee=ogChain[2] + \"_ik\", sol=\"ikRPsolver\", n=side + HandleName + \"_ikHandle\") cmds.setAttr(masterIkHandle[0]", "for coord in [\"X\", \"Y\", \"Z\"]: cmds.setAttr(switcherLoc[0] + \".translate\" +", "Move pivots on clavicle joint piv = cmds.xform(clavJoint, q=True, ws=True,", "scaleField_UI = cmds.intField(en=10, v=1, min=1) plusOne_UI = cmds.button(l=\"+1\", c=addOneUnit) plusThree_UI", "cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver,", "cmds.move(10,0,0, ikHandJoint, r=1, os=1) else: cmds.move(-10,0,0, ikHandJoint, r=1, os=1) cmds.parent(ikHandJoint,", "x in range(chainLen): cmds.parentConstraint(ogChain[x] + \"_fk_anim\", ogChain[x] + \"_fk\") #", "\"right\", 2), #-------------------- (orientControllerMenu, \"left\", 8), (orientControllerMenu, \"top\", 5), #--------------------", "type\") break newJointName = ogChain[i] + newJoint #create a joint,", "switcherLocGrp, mo=1) #remove .t, .r, .s and .v from the", "1) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\",", "# Set scale cmds.scale(fkSize, fkSize, fkSize, fk_controller) cmds.matchTransform(anim_group, ogChain[y]) cmds.delete(cmds.parentConstraint(ogChain[y],", "cmds.xform(ogChain[1], q=1, ws=1, t=1) end = cmds.xform(ogChain[2], q=1, ws=1, t=1)", "== \"r_\": controllerColor = rgb=(255, 0, 0) if chainMenu ==", "\"right\",0, 40), (plusOne_UI, \"right\", 0, 45), (plusThree_UI, \"right\", 0, 49)", "\"right\", 10, 24), (scaleControllerText, \"left\", 5, 0), (scaleField_UI, \"left\", 110,", "and move up clavControllerGrp = cmds.group(n=clavController + \"_grp\", em=1) cmds.delete(cmds.parentConstraint(clavJoint,", "crvIkCubeGrp)) cmds.color(crvIkCube, rgb=controllerColor) cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp) cmds.parent(armikHandle[0], crvIkCube) pvController", "if blendCheckbox == 0 and constraintCheckBox == 0: cmds.error(\"pls, select", "= 0) cmds.delete(fk_controller, ch = 1) cmds.color(fk_controller, rgb=controllerColor) # Set", "cmds.delete(ogChain[chainLen-1] + \"_fk_anim_grp\") def ikChainBuild(scaleIK, HandleName): masterIkHandle = cmds.ikHandle(sj=ogChain[0] +", "cmds.ls(sl = True, type = \"joint\")[0] ogChain = cmds.listRelatives(ogRootchain, ad", "cmds.connectAttr((ogChain[x] + \"_fk.rotate\"), blendColorsNode + \".color2\") cmds.connectAttr((blendColorsNode + \".output\"), (ogChain[x]", "t=1) startV = om.MVector(start[0], start[1], start[2]) midV = om.MVector(mid[0], mid[1],", "clavicle joint piv = cmds.xform(clavJoint, q=True, ws=True, t=True) cmds.xform(clavController, ws=True,", "axis\") cmds.menuItem(l=\"x\") cmds.menuItem(l=\"y\") cmds.menuItem(l=\"z\") # Scale the UI becase you'll", "rgb=(0, 0, 255) elif side == \"r_\": controllerColor = rgb=(255,", "(-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (0.5,", "t=True) cmds.xform(clavController, ws=True, piv=piv) cmds.xform(clavControllerGrp, ws=True, piv=piv) cmds.orientConstraint(clavController, clavJoint) #", "mid[1], mid[2]) endV = om.MVector(end[0], end[1], end[2]) startEnd = endV", ".s and .v from the channelbox for coord in [\"X\",", "scaleField_UI) legIK(scaleIK, masterIkHandle, HandleName) def armIk(armIkScale, armikHandle, pvName): ikHandJoint =", "\"x\": cmds.rotate(90,0,0, fk_controller) if orientController == \"y\": cmds.rotate(0,90,0, fk_controller) if", "switcherLoc global side global controllerColor global clavCheckbox global rigGrp, ctrlGrp", "separator01), (plusOne_UI, \"top\", 4, separator01), (plusThree_UI, \"top\", 4, separator01), (separator02,", "(0.5, 0.5, 0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (-0.5,", "on second axis if orientController == \"x\": cmds.rotate(90,0,0, fk_controller) if", "= cmds.separator(h=5) separator02 = cmds.separator(h=5) # execButton = cmds.button(l=\"Duplicate Chain\",", "#remove .t, .r, .s and .v from the channelbox for", "if x == 0: continue cmds.parent(ogChain[x] + \"_fk_anim_grp\", ogChain[x-1] +", "+ \"_parentConstraint1.\" + ogChain[x] + \"_ikW0\" fkSdkDriven = ogChain[x] +", "+ \"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def findPoleVector(loc, targetHandle): # This", "\".scale\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".visibility\", k=0, l=1)", "cmds.makeIdentity(ikHandJoint, a = 1, t = 1, r = 1,", "coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".visibility\", k=0, l=1) # Create", "clavSel(scaleController) else: cmds.parent(ogChain[0] + \"_ik\", ogChain[0] + \"_fk\", ctrlGrp) cmds.parent(ogChain[0]", "7, 8, 9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5], n=side", "\"Z\"]: #cmds.setAttr(fk_controller + \".translate\" + x, k=0, l=1) #cmds.setAttr(fk_controller +", "cmds.makeIdentity(newJointName, a = 1, t = 0, r = 1,", "== \"Leg\": chainLen = 5 else: #this is totally unscalable", "cmds.addAttr(pvController, ln=\"Follow\", k=1, r=1, min=0, max=1) cmds.addAttr(pvController, ln=\"Follow_Leg_Foot\", k=1, r=1,", "cmds.parent(ikFootControl, ikFootControlGrp) # Set size, freeze transform, create offset group", "= cmds.button(l=\"+3\", c=addThreeUnit) separator01 = cmds.separator(h=5) separator02 = cmds.separator(h=5) #", "cmds.addAttr(ikFootControl, at=\"enum\",enumName = \"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Twist\", k=1,", "if side == \"l_\": cmds.move(10,0,0, ikHandJoint, r=1, os=1) else: cmds.move(-10,0,0,", "Rename shape node shapeList = cmds.listRelatives(ikFootControl, s = True) cmds.rename(shapeList,", "c=addOneUnit) plusThree_UI = cmds.button(l=\"+3\", c=addThreeUnit) separator01 = cmds.separator(h=5) separator02 =", "<reponame>ssimbox/ssimbox-rigTools from ctrlUI_lib import createClav2, createSphere import maya.cmds as cmds", "cmds.parentConstraint(ogChain[x] + \"_fk_anim\", ogChain[x] + \"_fk\") # If leg chain", "Mode\", v=0, cc= lambda state: (cmds.checkBox(blendCheckbox_UI, e=1, en=state-1))) blendCheckbox_UI =", "cmds.formLayout(nd=50) # Useful in selecting which chain: Leg or Arm?", "\"_rig_grp\") ctrlGrp = cmds.group(em=1, n= side + chainMenu + \"_ctrl_grp\")", "ogChain.reverse() side = ogRootchain[0:2] # Initialize input from UI scaleController", "UI scaleController = cmds.intField(scaleField_UI, q=1, v=1) blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1,", "[\"X\", \"Y\", \"Z\"]: cmds.setAttr(switcherLoc[0] + \".translate\" + coord, k=0, l=1)", "armIkScale, crvIkCubeGrp) cmds.parent(armikHandle[0], crvIkCube) pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=armikHandle[0])", "cmds.checkBox(l=\"Clavicle\", vis=0) # Useful in orienting FK controllers as the", "+ \"_fk.visibility\", 0) # Create a locator used for switching", "warn some stuff related to Maya underworld # Set scale", "constraints cmds.parentConstraint((ogChain[x] + \"_ik\"), ogChain[x]) cmds.parentConstraint((ogChain[x] + \"_fk\"), ogChain[x]) #", "a locator used for switching IK/FK mode and snap it", "(1.108, 0, 0), (0.997, 0, 1.789), (0, 0, 2.39)], k=[0,1,2,3,4,5,6,7,8,9,10],", "2.39)], k=[0,1,2,3,4,5,6,7,8,9,10], n=side + \"leg_anim_ik\") # Rename shape node shapeList", "8, separator01), (plusOne_UI, \"top\", 4, separator01), (plusThree_UI, \"top\", 4, separator01),", "related to Maya underworld # Set scale cmds.scale(fkSize, fkSize, fkSize,", "\"left\", 0, 26), (blendCheckbox_UI, \"right\", 10, 24), (scaleControllerText, \"left\", 5,", "SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikFootControlGrp", "Create ordered hierarchy for x in reversed(range(chainLen)): if x ==", "6, scaleField_UI), (orientControllerMenu, \"top\", 6, separator02), ], attachPosition = [#(clavCheckbox_UI,", "\".scale\" + x, k=0, l=1) # Create ordered hierarchy for", "== \"x\": cmds.rotate(90,0,0, fk_controller) if orientController == \"y\": cmds.rotate(0,90,0, fk_controller)", "+ chainMenu + \"_rig_grp\") ctrlGrp = cmds.group(em=1, n= side +", "\"left\", 5, 0), (scaleField_UI, \"left\", 110, 0), #(scaleField_UI, \"right\",0, 40),", "= 1, r = 1, s = 0) cmds.delete(fk_controller, ch", "0)) #yellow cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp)) cmds.parent(switcherLoc, switcherLocGrp) cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp)) cmds.addAttr(switcherLoc,", "\"hand_ik\") cmds.delete(cmds.parentConstraint(ogChain[2] + \"_ik\", ikHandJoint)) cmds.makeIdentity(ikHandJoint, a = 1, t", "ogChain[x] + \"_parentConstraint1.\" + ogChain[x] + \"_ikW0\" fkSdkDriven = ogChain[x]", "clavControllerGrp = cmds.group(n=clavController + \"_grp\", em=1) cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp)) cmds.parent(clavController, clavControllerGrp)", ") # Rename shape node shapeList = cmds.listRelatives(crvIkCube, s =", "from functools import partial def duplicateChain(*args): global ogChain global chainLen", "shapeList = cmds.listRelatives(ikFootControl, s = True) cmds.rename(shapeList, ikFootControl + \"Shape\")", "relation type\") break newJointName = ogChain[i] + newJoint #create a", "(0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5), (0.5,", "= cmds.ikHandle(sj=ogChain[3] + \"_ik\", ee=ogChain[4] + \"_ik\", sol=\"ikSCsolver\", n=side +", ",rigGrp) #set SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver,", "v=0, cc= lambda state: (cmds.checkBox(blendCheckbox_UI, e=1, en=state-1))) blendCheckbox_UI = cmds.checkBox(label", "\".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=0, dv=0) cmds.setDrivenKeyframe(pvController", "ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def constraintFunc(scaleController, selectChain): # Create some", "+ \"_ik\"), (ogChain[0] + \"_fk\"), clavController) cmds.parent(clavControllerGrp, ctrlGrp) def visCheck(vis):", "= True) cmds.rename(shapeList, crvIkCube + \"Shape\") crvIkCubeGrp = cmds.group(n=crvIkCube +", "0, 1.789), (0, 0, 2.39)], k=[0,1,2,3,4,5,6,7,8,9,10], n=side + \"leg_anim_ik\") #", "cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=1, dv=1) def findPoleVector(loc, targetHandle): #", "ogChain[x]) # Setup SDK naming convention sdkDriver = switcherLoc[0] +", "+ ogChain[x] + \"_ikW0\" fkSdkDriven = ogChain[x] + \"_parentConstraint1.\" +", "\"Leg\": if x == (chainLen-1): cmds.delete(ogChain[chainLen-1] + \"_fk_anim_grp\") def ikChainBuild(scaleIK,", "\"right\", 5), (plusThree_UI, \"right\", 5), (scaleControllerText, \"left\", 5), (separator02, \"left\",", "newJointList: for i in range(chainLen): if blendCheckbox == 0 and", "5), ], attachControl = [(clavCheckbox_UI, \"left\", 10, chainMenu_UI), (constraintCheckBox_UI, \"top\",", "midV cmds.xform(loc, ws=1, t=(finalV.x, finalV.y ,finalV.z)) locGrp = cmds.group(em=1, n=loc", "#distance from joint finalV = arrowV + midV cmds.xform(loc, ws=1,", "\"_grp\") cmds.parent(ikFootControl, ikFootControlGrp) # Set size, freeze transform, create offset", "cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\", cd=sdkDriver, v=1, dv=1) cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver,", "= cmds.intField(scaleField_UI, q=1, v=1) blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1, v=1) constraintCheckBox", "clavControllerGrp, ws=1, r=1) cmds.color(clavController, rgb=controllerColor) # Move pivots on clavicle", "not if legOrArm == \"Leg\": if x == (chainLen-1): cmds.delete(ogChain[chainLen-1]", "+ \"_ik\"), ogChain[x]) cmds.parentConstraint((ogChain[x] + \"_fk\"), ogChain[x]) # Setup SDK", "crvIkCube = cmds.curve(d=1, p=[(-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5,", "toe controller, else not if legOrArm == \"Leg\": if x", "if chainMenu == \"Leg\": chainLen = 5 else: #this is", "cmds.group(em=1, n=ikFootControl + \"_grp\") cmds.parent(ikFootControl, ikFootControlGrp) # Set size, freeze", "(0.5, 0.5, -0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, 0.5), (-0.5,", "some blendColors node with the same name of the joint", "clavCheckbox == 1: clavSel(scaleController) else: cmds.parent(ogChain[0] + \"_ik\", ogChain[0] +", "improved orientControllerMenu = cmds.optionMenu(\"UI_orientControllerMenu\", l=\"What's the secondary axis\") cmds.menuItem(l=\"x\") cmds.menuItem(l=\"y\")", "[ (chainMenu_UI, \"left\", 8), (chainMenu_UI, \"top\", 5), (chainMenu_UI, \"right\", 80),", "0, 1.789), (0, 0, 2.39), (-0.997,0,1.789), (-1.108, 0, 0), (-0.784,", "= cmds.curve(d=2, p=[(0.997, 0, 1.789), (0, 0, 2.39), (-0.997,0,1.789), (-1.108,", "ok chainLen = 3 #suffix for the new chains newJointList", "\"_parentConstraint1.\" + ogChain[x] + \"_fkW1\" # Setup SDK cmds.setAttr(sdkDriver, 0)", "scale cmds.scale(fkSize, fkSize, fkSize, fk_controller) cmds.matchTransform(anim_group, ogChain[y]) cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller)) cmds.parent(fk_controller,", "sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(crvIkCubeGrp + \".visibility\",", "ballikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ogChain[3] + \"_ik\", sol=\"ikSCsolver\", n=side", "1, r = 1, s = 1) cmds.delete(ikFootControl, ch =", "-2.5), (1.108, 0, 0), (0.997, 0, 1.789), (0, 0, 2.39)],", "k=0, l=1) #cmds.setAttr(fk_controller + \".scale\" + x, k=0, l=1) #", "\"l_\": cmds.move(10,0,0, ikHandJoint, r=1, os=1) else: cmds.move(-10,0,0, ikHandJoint, r=1, os=1)", "True) cmds.rename(shapeList, ikFootControl + \"Shape\") ikFootControlGrp = cmds.group(em=1, n=ikFootControl +", "fixedScale, fixedScale, clavController) cmds.makeIdentity(clavController, a=1) cmds.move(0,10,0, clavControllerGrp, ws=1, r=1) cmds.color(clavController,", "addThreeUnit(*args): global count count = count + 3 cmds.intField(scaleField_UI, v=1+count,", "in reversed(range(chainLen)): if x == 0: continue cmds.parent(ogChain[x] + \"_fk_anim_grp\",", "10, 24), (scaleControllerText, \"left\", 5, 0), (scaleField_UI, \"left\", 110, 0),", "dotP = startMid * startEnd proj = float(dotP) / float(startEnd.length())", "attributes #for x in [\"X\", \"Y\", \"Z\"]: #cmds.setAttr(fk_controller + \".translate\"", "cmds.optionMenu(\"UI_orientControllerMenu\", l=\"What's the secondary axis\") cmds.menuItem(l=\"x\") cmds.menuItem(l=\"y\") cmds.menuItem(l=\"z\") # Scale", "joints switcherLoc = cmds.spaceLocator(n=side + chainMenu + \"_ikfk_Switch\") switcherLocGrp =", "(constraintCheckBox_UI, \"top\", 5, chainMenu_UI), (blendCheckbox_UI, \"top\", 5, chainMenu_UI), (separator01, \"top\",", "= switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ikFootControlGrp + \".visibility\", cd=sdkDriver,", "= cmds.checkBox(l=\"Clavicle\", vis=0) # Useful in orienting FK controllers as", "Chain\", c=partial(duplicateChain, blendNodeFunc, constraintFunc)) cmds.formLayout(mainLayout, e=1, attachForm = [ (chainMenu_UI,", "1), (separator01, \"right\", 2), #-------------------- (scaleField_UI, \"right\", 65), (scaleField_UI, \"left\",", "piv=piv) cmds.xform(clavControllerGrp, ws=True, piv=piv) cmds.orientConstraint(clavController, clavJoint) # Parent ik and", "ogChain[i] + newJoint #create a joint, copy their position and", "= \"------\", ln=\"Attributes\", k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Twist\", k=1, r=1) cmds.addAttr(ikFootControl,", "cmds.setDrivenKeyframe(pvController + \"_grp.visibility\", cd=sdkDriver, v=0, dv=0) cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ikFootControlGrp +", "state: (cmds.checkBox(constraintCheckBox_UI, e=1, en=state-1))) clavCheckbox_UI = cmds.checkBox(l=\"Clavicle\", vis=0) # Useful", "w=300, h=300, s=1) mainLayout = cmds.formLayout(nd=50) # Useful in selecting", "\"_fk\", \"_scale\"] for newJoint in newJointList: for i in range(chainLen):", "HandleName + \"_ikHandle\") cmds.setAttr(masterIkHandle[0] + \".visibility\", 0) if HandleName ==", "= scaleClav/4 cmds.scale(fixedScale, fixedScale, fixedScale, clavController) cmds.makeIdentity(clavController, a=1) cmds.move(0,10,0, clavControllerGrp,", "leg chain is selected delete toe controller, else not if", "ogChain[x] + \"_blend\") # Connect FK and IK chains into", "[\"_ik\", \"_fk\", \"_scale\"] for newJoint in newJointList: for i in", "fkControllerCreator(scaleController, selectChain) def fkControllerCreator(fkSize, legOrArm): orientController = cmds.optionMenu(\"UI_orientControllerMenu\", q=1, v=1)", "1, 2, 3, 4, 5, 6, 7, 8, 9, 1.0,", "same name of the joint for x in range(chainLen): blendColorsNode", "5), (plusThree_UI, \"right\", 5), (scaleControllerText, \"left\", 5), (separator02, \"left\", 1),", "different hierarchies cmds.select(cl = 1) cmds.parent((ogChain[0] + \"_ik\"), world =", "createClav2(clavJoint + \"_anim\") # Import coordinates from ctrlUI_lib cmds.delete(cmds.pointConstraint(clavJoint, clavController))", "cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp) cmds.parent(armikHandle[0], crvIkCube) pvController = createSphere(nome= side+pvName+\"_PV\")", "(0.5, -0.5, -0.5), (-0.5, -0.5, -0.5)], k=[0, 1, 2, 3,", "kinda black magic # All credits to https://vimeo.com/66015036 start =", "\".blender\") ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def constraintFunc(scaleController, selectChain): # Create", "e=1) def addThreeUnit(*args): global count count = count + 3", "joint chain cmds.connectAttr((ogChain[x] + \"_ik.rotate\"), blendColorsNode + \".color1\") cmds.connectAttr((ogChain[x] +", "count count = count + 1 cmds.intField(scaleField_UI, v=1+count, e=1) def", "pivot snapping on ankle joint piv = cmds.xform(ogChain[2], q=True, ws=True,", "0), (0.997, 0, 1.789), (0, 0, 2.39)], k=[0,1,2,3,4,5,6,7,8,9,10], n=side +", "side + chainMenu + \"_rig_grp\") ctrlGrp = cmds.group(em=1, n= side", "t=1) end = cmds.xform(ogChain[2], q=1, ws=1, t=1) startV = om.MVector(start[0],", "not [0] it'll warn some stuff related to Maya underworld", "cd=sdkDriver, v=1, dv=1) def legIK(ikFootScale, legikHandle, pvName): ballikHandle = cmds.ikHandle(sj=ogChain[2]", "ln=bone, k=1, r=1) for coord in [\"X\", \"Y\", \"Z\"]: cmds.addAttr(ikFootControl,", "#for x in [\"X\", \"Y\", \"Z\"]: #cmds.setAttr(fk_controller + \".translate\" +", "coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".rotate\" + coord, k=0, l=1)", "\"bottom\", 5), (execButton, \"left\", 5), (execButton, \"right\", 5), ], attachControl", "rigGrp) def clavSel(scaleClav): # Select clavicle Joint moving up and", "cmds.intField(scaleField_UI, q=1, v=1) blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1, v=1) constraintCheckBox =", "cmds.orientConstraint(clavController, clavJoint) # Parent ik and fk chain under clavicle", "\"top\", 4, separator01), (plusThree_UI, \"top\", 4, separator01), (separator02, \"top\", 6,", "+ \"_fk_anim_grp\") def ikChainBuild(scaleIK, HandleName): masterIkHandle = cmds.ikHandle(sj=ogChain[0] + \"_ik\",", "k=1, r=1) cmds.addAttr(ikFootControl, ln=\"Lateral_Roll\", k=1, r=1) for bone in [\"Ankle\",", "5 else: #this is totally unscalable but for now it's", "constraintFunc(scaleController, chainMenu) if clavCheckbox == 1: clavSel(scaleController) else: cmds.parent(ogChain[0] +", "visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(crvIkCubeGrp +", "5), (separator01, \"left\", 1), (separator01, \"right\", 2), #-------------------- (scaleField_UI, \"right\",", "Setup orient constraints cmds.parentConstraint((ogChain[x] + \"_ik\"), ogChain[x]) cmds.parentConstraint((ogChain[x] + \"_fk\"),", "+ \".blender\") ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain) def constraintFunc(scaleController, selectChain): #", "color and then make Constraint cmds.delete(cmds.pointConstraint(loc, locGrp)) cmds.parent(loc, locGrp) cmds.makeIdentity(loc,", "\"r_\": controllerColor = rgb=(255, 0, 0) if chainMenu == \"Leg\":", "0.5), (0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5),", "group and color cmds.scale(ikFootScale, ikFootScale, ikFootScale, ikFootControlGrp) cmds.move(0,-3.2,0, ikFootControl, r=1)", "If not [0] it'll warn some stuff related to Maya", "True, type = \"joint\") ogChain.append(ogRootchain) ogChain.reverse() side = ogRootchain[0:2] #", "l=1) #cmds.setAttr(fk_controller + \".scale\" + x, k=0, l=1) # Create", "if HandleName == \"Arm\": #print (\"scaleController\", scaleField_UI) armIk(scaleIK, masterIkHandle, HandleName)", "and constraintCheckBox == 0: cmds.error(\"pls, select one relation type\") break", "delete history and set color cmds.makeIdentity(fk_controller, a = 1, t", "# Rename shape node shapeList = cmds.listRelatives(ikFootControl, s = True)", "0) #deselect to make the two different hierarchies cmds.select(cl =", "Useful in orienting FK controllers as the user wishes. Maybe", "cmds.setAttr(switcherLoc[0] + \".rotate\" + coord, k=0, l=1) cmds.setAttr(switcherLoc[0] + \".scale\"", "orientController == \"x\": cmds.rotate(90,0,0, fk_controller) if orientController == \"y\": cmds.rotate(0,90,0,", "ogChain[y]) cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller)) cmds.parent(fk_controller, anim_group) # Set controller orientation based", "rgb=controllerColor) # pivot snapping on ankle joint piv = cmds.xform(ogChain[2],", "size, freeze transform, create offset group and color cmds.scale(ikFootScale, ikFootScale,", "controller ---> Sphere pvController = createSphere(nome= side+pvName+\"_PV\") findPoleVector(loc=pvController, targetHandle=legikHandle[0]) cmds.addAttr(pvController,", "rigGrp = cmds.group(em=1, n= side + chainMenu + \"_rig_grp\") ctrlGrp", "= startMid * startEnd proj = float(dotP) / float(startEnd.length()) startEndN", "clavJoint) clavController = createClav2(clavJoint + \"_anim\") # Import coordinates from", "+ \"_rig_grp\") ctrlGrp = cmds.group(em=1, n= side + chainMenu +", "ctrlUI_lib cmds.delete(cmds.pointConstraint(clavJoint, clavController)) # Create offset group, FDH and move", "def constraintFunc(scaleController, selectChain): # Create some blendColors node with the", "global ogChain global chainLen global switcherLoc global side global controllerColor", "it's ok chainLen = 3 #suffix for the new chains", "v=1, dv=1) cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1) ikChainBuild(scaleController, selectChain) fkControllerCreator(scaleController, selectChain)", "arrowV*= 10 #distance from joint finalV = arrowV + midV", "= switcherLoc[0] + \".FKIK_Mode\" ikSdkDriven = ogChain[x] + \"_parentConstraint1.\" +", "\"Z\"]: cmds.addAttr(ikFootControl, ln=bone+coord, k=1, r=1) # Parent ikController and PV", "locGrp)) cmds.parent(loc, locGrp) cmds.makeIdentity(loc, a=1, t=1, r=1, s=1) cmds.color(loc, rgb=controllerColor)", "if x == (chainLen-1): cmds.delete(ogChain[chainLen-1] + \"_fk_anim_grp\") def ikChainBuild(scaleIK, HandleName):", "1, t = 0, r = 1, s = 0)", "-0.5), (0.5, -0.5, 0.5), (0.5, -0.5, -0.5), (-0.5, -0.5, -0.5)],", "om.MVector(start[0], start[1], start[2]) midV = om.MVector(mid[0], mid[1], mid[2]) endV =", "ikHandJoint)) cmds.makeIdentity(ikHandJoint, a = 1, t = 1, r =", "cmds.parent(ikHandJoint, ogChain[2] + \"_ik\") handikHandle = cmds.ikHandle(sj=ogChain[2] + \"_ik\", ee=ikHandJoint,", "switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver, 1) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=1,", "mid[2]) endV = om.MVector(end[0], end[1], end[2]) startEnd = endV -", "global switcherLoc global side global controllerColor global clavCheckbox global rigGrp,", "min=0, max=1, dv=0.5) # Parent ikController and PV under _rig_GRP", "# Set SDK visibility sdkDriver = switcherLoc[0] + \".FKIK_Mode\" cmds.setAttr(sdkDriver,", "\"Toe_Tap\"]: cmds.addAttr(ikFootControl, at=\"enum\", enumName = \"------\", ln=bone, k=1, r=1) for", "= 1, s = 1) cmds.delete(ikFootControl, ch = 1) cmds.delete(cmds.pointConstraint(ogChain[3]", "\".visibility\", k=0, l=1) # Create hierarchy groups rigGrp = cmds.group(em=1,", "+ \"_ik.rotate\"), blendColorsNode + \".color1\") cmds.connectAttr((ogChain[x] + \"_fk.rotate\"), blendColorsNode +", "0, -2.5), (1.108, 0, 0), (0.997, 0, 1.789), (0, 0,", "count = 0 def addOneUnit(*args): global count count = count", "cmds.setAttr(sdkDriver, 0) cmds.setDrivenKeyframe(ogChain[0] + \"_fk_anim_grp.visibility\", cd=sdkDriver, v=0, dv=1) # Lock", "(-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5), (-0.5,", "blendColors node with the same name of the joint for", "chainMenu + \"_rig_grp\") ctrlGrp = cmds.group(em=1, n= side + chainMenu", "piv = cmds.xform(clavJoint, q=True, ws=True, t=True) cmds.xform(clavController, ws=True, piv=piv) cmds.xform(clavControllerGrp,", "credits to https://vimeo.com/66015036 start = cmds.xform(ogChain[0], q=1, ws=1, t=1) mid" ]
[ "{} scaled {} \".format(summary.copied, summary.scaled) difference = summary.todo - (summary.copied", "smooth, jobs, results, concurrency): for _ in range(concurrency): process =", "will be # useful, but WITHOUT ANY WARRANTY; without even", "smooth, source, target, concurrency) summarize(summary, concurrency) def handle_commandline(): parser =", "for _ in range(concurrency): process = multiprocessing.Process(target=worker, args=(size, smooth, jobs,", "finally: jobs.task_done() def add_jobs(source, target, jobs): for todo, name in", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU", "free software: you can redistribute it # and/or modify it", "scaled name\") Summary = collections.namedtuple(\"Summary\", \"todo copied scaled canceled\") def", "size))) newImage = oldImage.subsample(stride) newImage.save(targetImage) return Result(0, 1, targetImage) def", "results, concurrency) todo = add_jobs(source, target, jobs) try: jobs.join() except", "different\") if not os.path.exists(args.target): os.makedirs(target) return args.size, args.smooth, source, target,", "Image.Error as err: Qtrac.report(str(err), True) finally: jobs.task_done() def add_jobs(source, target,", "scaled image that fits the given dimension \" \"[default: %(default)d]\")", "WITHOUT ANY WARRANTY; without even the implied warranty of #", "\"timing) [default: %(default)d]\") parser.add_argument(\"-s\", \"--size\", default=400, type=int, help=\"make a scaled", "scaled, canceled) def create_processes(size, smooth, jobs, results, concurrency): for _", "General Public License as # published by the Free Software", "= summary.todo - (summary.copied + summary.scaled) if difference: message +=", "reserved. # This program or module is free software: you", "scale(size, smooth, source, target, concurrency) summarize(summary, concurrency) def handle_commandline(): parser", "source, target, concurrency) summarize(summary, concurrency) def handle_commandline(): parser = argparse.ArgumentParser()", "under the terms of the GNU General Public License as", "source, target, args.concurrency def scale(size, smooth, source, target, concurrency): canceled", "module is free software: you can redistribute it # and/or", "rights reserved. # This program or module is free software:", "while True: try: sourceImage, targetImage = jobs.get() try: result =", "is provided for # educational purposes and is distributed in", "smooth, jobs, results): while True: try: sourceImage, targetImage = jobs.get()", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "that it will be # useful, but WITHOUT ANY WARRANTY;", "os.path.exists(args.target): os.makedirs(target) return args.size, args.smooth, source, target, args.concurrency def scale(size,", "difference = summary.todo - (summary.copied + summary.scaled) if difference: message", "import collections import math import multiprocessing import os import sys", "you can redistribute it # and/or modify it under the", "{}\".format(\"copied\" if result.copied else \"scaled\", os.path.basename(result.name))) results.put(result) except Image.Error as", "False jobs = multiprocessing.JoinableQueue() results = multiprocessing.Queue() create_processes(size, smooth, jobs,", "on Windows Qtrac.report(\"canceling...\") canceled = True copied = scaled =", "size / oldImage.height) newImage = oldImage.scale(scale) else: stride = int(math.ceil(max(oldImage.width", "scaled .xpm images\") args = parser.parse_args() source = os.path.abspath(args.source) target", "oldImage.width <= size and oldImage.height <= size: oldImage.save(targetImage) return Result(1,", "the scaled .xpm images\") args = parser.parse_args() source = os.path.abspath(args.source)", "target, args.concurrency def scale(size, smooth, source, target, concurrency): canceled =", "# May not work on Windows Qtrac.report(\"canceling...\") canceled = True", "License as # published by the Free Software Foundation, either", "# License, or (at your option) any later version. It", "as err: Qtrac.report(str(err), True) finally: jobs.task_done() def add_jobs(source, target, jobs):", "= multiprocessing.JoinableQueue() results = multiprocessing.Queue() create_processes(size, smooth, jobs, results, concurrency)", "be different\") if not os.path.exists(args.target): os.makedirs(target) return args.size, args.smooth, source,", "[default: %(default)d]\") parser.add_argument(\"-s\", \"--size\", default=400, type=int, help=\"make a scaled image", "the concurrency (for debugging and \" \"timing) [default: %(default)d]\") parser.add_argument(\"-s\",", "<filename>pipng/imagescale-q-m.py #!/usr/bin/env python3 # Copyright © 2012-13 Qtrac Ltd. All", "sourceImage = os.path.join(source, name) targetImage = os.path.join(target, name) jobs.put((sourceImage, targetImage))", "collections import math import multiprocessing import os import sys import", "process.start() def worker(size, smooth, jobs, results): while True: try: sourceImage,", "concurrency = handle_commandline() Qtrac.report(\"starting...\") summary = scale(size, smooth, source, target,", "the terms of the GNU General Public License as #", "message += \"using {} processes\".format(concurrency) if summary.canceled: message += \"", "help=\"use smooth scaling (slow but good for text)\") parser.add_argument(\"source\", help=\"the", "result.copied else \"scaled\", os.path.basename(result.name))) results.put(result) except Image.Error as err: Qtrac.report(str(err),", "oldImage.height / size))) newImage = oldImage.subsample(stride) newImage.save(targetImage) return Result(0, 1,", "result = results.get_nowait() copied += result.copied scaled += result.scaled return", "of the GNU General Public License as # published by", "jobs, results, concurrency) todo = add_jobs(source, target, jobs) try: jobs.join()", "results.empty(): # Safe because all jobs have finished result =", "= results.get_nowait() copied += result.copied scaled += result.scaled return Summary(todo,", "if smooth: scale = min(size / oldImage.width, size / oldImage.height)", "name\") Summary = collections.namedtuple(\"Summary\", \"todo copied scaled canceled\") def main():", "results)) process.daemon = True process.start() def worker(size, smooth, jobs, results):", "= os.path.join(target, name) jobs.put((sourceImage, targetImage)) return todo def scale_one(size, smooth,", "if summary.canceled: message += \" [canceled]\" Qtrac.report(message) print() if __name__", "or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "distributed in the hope that it will be # useful,", "a scaled image that fits the given dimension \" \"[default:", "version. It is provided for # educational purposes and is", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "target, concurrency = handle_commandline() Qtrac.report(\"starting...\") summary = scale(size, smooth, source,", "/ oldImage.width, size / oldImage.height) newImage = oldImage.scale(scale) else: stride", "Foundation, either version 3 of the # License, or (at", "+= \"skipped {} \".format(difference) message += \"using {} processes\".format(concurrency) if", "= \"copied {} scaled {} \".format(summary.copied, summary.scaled) difference = summary.todo", "os.makedirs(target) return args.size, args.smooth, source, target, args.concurrency def scale(size, smooth,", "if oldImage.width <= size and oldImage.height <= size: oldImage.save(targetImage) return", "os.path.basename(result.name))) results.put(result) except Image.Error as err: Qtrac.report(str(err), True) finally: jobs.task_done()", "oldImage.height) newImage = oldImage.scale(scale) else: stride = int(math.ceil(max(oldImage.width / size,", "- (summary.copied + summary.scaled) if difference: message += \"skipped {}", "parser.add_argument(\"-S\", \"--smooth\", action=\"store_true\", help=\"use smooth scaling (slow but good for", "import multiprocessing import os import sys import Image import Qtrac", "smooth, jobs, results)) process.daemon = True process.start() def worker(size, smooth,", "oldImage.scale(scale) else: stride = int(math.ceil(max(oldImage.width / size, oldImage.height / size)))", "/ oldImage.height) newImage = oldImage.scale(scale) else: stride = int(math.ceil(max(oldImage.width /", "targetImage) def summarize(summary, concurrency): message = \"copied {} scaled {}", "sys import Image import Qtrac Result = collections.namedtuple(\"Result\", \"copied scaled", "difference: message += \"skipped {} \".format(difference) message += \"using {}", "newImage.save(targetImage) return Result(0, 1, targetImage) def summarize(summary, concurrency): message =", "either version 3 of the # License, or (at your", "return Result(1, 0, targetImage) else: if smooth: scale = min(size", "be # useful, but WITHOUT ANY WARRANTY; without even the", "Qtrac.report(\"{} {}\".format(\"copied\" if result.copied else \"scaled\", os.path.basename(result.name))) results.put(result) except Image.Error", "os.path.abspath(args.target) if source == target: args.error(\"source and target must be", "math import multiprocessing import os import sys import Image import", "the original .xpm images\") parser.add_argument(\"target\", help=\"the directory for the scaled", "Qtrac.report(\"canceling...\") canceled = True copied = scaled = 0 while", "args.smooth, source, target, args.concurrency def scale(size, smooth, source, target, concurrency):", "source, target, concurrency): canceled = False jobs = multiprocessing.JoinableQueue() results", "any later version. It is provided for # educational purposes", "and oldImage.height <= size: oldImage.save(targetImage) return Result(1, 0, targetImage) else:", "args=(size, smooth, jobs, results)) process.daemon = True process.start() def worker(size,", "else: stride = int(math.ceil(max(oldImage.width / size, oldImage.height / size))) newImage", "\"copied scaled name\") Summary = collections.namedtuple(\"Summary\", \"todo copied scaled canceled\")", "{} \".format(difference) message += \"using {} processes\".format(concurrency) if summary.canceled: message", "\"--smooth\", action=\"store_true\", help=\"use smooth scaling (slow but good for text)\")", "\"todo copied scaled canceled\") def main(): size, smooth, source, target,", "\" \"[default: %(default)d]\") parser.add_argument(\"-S\", \"--smooth\", action=\"store_true\", help=\"use smooth scaling (slow", "argparse import collections import math import multiprocessing import os import", "default=multiprocessing.cpu_count(), help=\"specify the concurrency (for debugging and \" \"timing) [default:", "option) any later version. It is provided for # educational", "See the GNU # General Public License for more details.", "= oldImage.scale(scale) else: stride = int(math.ceil(max(oldImage.width / size, oldImage.height /", "name) jobs.put((sourceImage, targetImage)) return todo def scale_one(size, smooth, sourceImage, targetImage):", "= True copied = scaled = 0 while not results.empty():", "# This program or module is free software: you can", "oldImage = Image.from_file(sourceImage) if oldImage.width <= size and oldImage.height <=", "A PARTICULAR PURPOSE. See the GNU # General Public License", "try: result = scale_one(size, smooth, sourceImage, targetImage) Qtrac.report(\"{} {}\".format(\"copied\" if", "image that fits the given dimension \" \"[default: %(default)d]\") parser.add_argument(\"-S\",", "summary.canceled: message += \" [canceled]\" Qtrac.report(message) print() if __name__ ==", "True process.start() def worker(size, smooth, jobs, results): while True: try:", "= oldImage.subsample(stride) newImage.save(targetImage) return Result(0, 1, targetImage) def summarize(summary, concurrency):", "+= \" [canceled]\" Qtrac.report(message) print() if __name__ == \"__main__\": main()", "it # and/or modify it under the terms of the", "finished result = results.get_nowait() copied += result.copied scaled += result.scaled", "Summary(todo, copied, scaled, canceled) def create_processes(size, smooth, jobs, results, concurrency):", "text)\") parser.add_argument(\"source\", help=\"the directory containing the original .xpm images\") parser.add_argument(\"target\",", "smooth, sourceImage, targetImage) Qtrac.report(\"{} {}\".format(\"copied\" if result.copied else \"scaled\", os.path.basename(result.name)))", "modify it under the terms of the GNU General Public", "if source == target: args.error(\"source and target must be different\")", "scaled canceled\") def main(): size, smooth, source, target, concurrency =", "targetImage) Qtrac.report(\"{} {}\".format(\"copied\" if result.copied else \"scaled\", os.path.basename(result.name))) results.put(result) except", "in enumerate(os.listdir(source), start=1): sourceImage = os.path.join(source, name) targetImage = os.path.join(target,", "collections.namedtuple(\"Summary\", \"todo copied scaled canceled\") def main(): size, smooth, source,", "directory containing the original .xpm images\") parser.add_argument(\"target\", help=\"the directory for", "else: if smooth: scale = min(size / oldImage.width, size /", "try: jobs.join() except KeyboardInterrupt: # May not work on Windows", "source, target, concurrency = handle_commandline() Qtrac.report(\"starting...\") summary = scale(size, smooth,", "concurrency) summarize(summary, concurrency) def handle_commandline(): parser = argparse.ArgumentParser() parser.add_argument(\"-c\", \"--concurrency\",", "multiprocessing.JoinableQueue() results = multiprocessing.Queue() create_processes(size, smooth, jobs, results, concurrency) todo", "dimension \" \"[default: %(default)d]\") parser.add_argument(\"-S\", \"--smooth\", action=\"store_true\", help=\"use smooth scaling", "sourceImage, targetImage = jobs.get() try: result = scale_one(size, smooth, sourceImage,", "but good for text)\") parser.add_argument(\"source\", help=\"the directory containing the original", "concurrency): for _ in range(concurrency): process = multiprocessing.Process(target=worker, args=(size, smooth,", "for text)\") parser.add_argument(\"source\", help=\"the directory containing the original .xpm images\")", "the # License, or (at your option) any later version.", "/ size, oldImage.height / size))) newImage = oldImage.subsample(stride) newImage.save(targetImage) return", "= os.path.join(source, name) targetImage = os.path.join(target, name) jobs.put((sourceImage, targetImage)) return", "for the scaled .xpm images\") args = parser.parse_args() source =", "Result(0, 1, targetImage) def summarize(summary, concurrency): message = \"copied {}", "This program or module is free software: you can redistribute", "argparse.ArgumentParser() parser.add_argument(\"-c\", \"--concurrency\", type=int, default=multiprocessing.cpu_count(), help=\"specify the concurrency (for debugging", "work on Windows Qtrac.report(\"canceling...\") canceled = True copied = scaled", "target, jobs): for todo, name in enumerate(os.listdir(source), start=1): sourceImage =", "args.error(\"source and target must be different\") if not os.path.exists(args.target): os.makedirs(target)", "else \"scaled\", os.path.basename(result.name))) results.put(result) except Image.Error as err: Qtrac.report(str(err), True)", "Image.from_file(sourceImage) if oldImage.width <= size and oldImage.height <= size: oldImage.save(targetImage)", "not os.path.exists(args.target): os.makedirs(target) return args.size, args.smooth, source, target, args.concurrency def", "try: sourceImage, targetImage = jobs.get() try: result = scale_one(size, smooth,", "create_processes(size, smooth, jobs, results, concurrency) todo = add_jobs(source, target, jobs)", "your option) any later version. It is provided for #", "It is provided for # educational purposes and is distributed", "2012-13 Qtrac Ltd. All rights reserved. # This program or", "KeyboardInterrupt: # May not work on Windows Qtrac.report(\"canceling...\") canceled =", "target, concurrency): canceled = False jobs = multiprocessing.JoinableQueue() results =", "in the hope that it will be # useful, but", "size: oldImage.save(targetImage) return Result(1, 0, targetImage) else: if smooth: scale", "\"scaled\", os.path.basename(result.name))) results.put(result) except Image.Error as err: Qtrac.report(str(err), True) finally:", "def scale(size, smooth, source, target, concurrency): canceled = False jobs", "\"using {} processes\".format(concurrency) if summary.canceled: message += \" [canceled]\" Qtrac.report(message)", "targetImage = jobs.get() try: result = scale_one(size, smooth, sourceImage, targetImage)", "concurrency) todo = add_jobs(source, target, jobs) try: jobs.join() except KeyboardInterrupt:", "return Summary(todo, copied, scaled, canceled) def create_processes(size, smooth, jobs, results,", "good for text)\") parser.add_argument(\"source\", help=\"the directory containing the original .xpm", "oldImage.height <= size: oldImage.save(targetImage) return Result(1, 0, targetImage) else: if", "multiprocessing import os import sys import Image import Qtrac Result", "= Image.from_file(sourceImage) if oldImage.width <= size and oldImage.height <= size:", "summary.scaled) difference = summary.todo - (summary.copied + summary.scaled) if difference:", "the hope that it will be # useful, but WITHOUT", "FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General", "collections.namedtuple(\"Result\", \"copied scaled name\") Summary = collections.namedtuple(\"Summary\", \"todo copied scaled", "scaled += result.scaled return Summary(todo, copied, scaled, canceled) def create_processes(size,", "jobs.get() try: result = scale_one(size, smooth, sourceImage, targetImage) Qtrac.report(\"{} {}\".format(\"copied\"", "= scaled = 0 while not results.empty(): # Safe because", "# published by the Free Software Foundation, either version 3", "jobs.task_done() def add_jobs(source, target, jobs): for todo, name in enumerate(os.listdir(source),", "canceled) def create_processes(size, smooth, jobs, results, concurrency): for _ in", "+= result.scaled return Summary(todo, copied, scaled, canceled) def create_processes(size, smooth,", "parser.add_argument(\"source\", help=\"the directory containing the original .xpm images\") parser.add_argument(\"target\", help=\"the", "Result = collections.namedtuple(\"Result\", \"copied scaled name\") Summary = collections.namedtuple(\"Summary\", \"todo", "concurrency): message = \"copied {} scaled {} \".format(summary.copied, summary.scaled) difference", "more details. import argparse import collections import math import multiprocessing", "= os.path.abspath(args.source) target = os.path.abspath(args.target) if source == target: args.error(\"source", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "def add_jobs(source, target, jobs): for todo, name in enumerate(os.listdir(source), start=1):", "software: you can redistribute it # and/or modify it under", "source = os.path.abspath(args.source) target = os.path.abspath(args.target) if source == target:", "is free software: you can redistribute it # and/or modify", "name in enumerate(os.listdir(source), start=1): sourceImage = os.path.join(source, name) targetImage =", "def summarize(summary, concurrency): message = \"copied {} scaled {} \".format(summary.copied,", "not results.empty(): # Safe because all jobs have finished result", "<= size: oldImage.save(targetImage) return Result(1, 0, targetImage) else: if smooth:", "scaled = 0 while not results.empty(): # Safe because all", "process.daemon = True process.start() def worker(size, smooth, jobs, results): while", "oldImage.width, size / oldImage.height) newImage = oldImage.scale(scale) else: stride =", "start=1): sourceImage = os.path.join(source, name) targetImage = os.path.join(target, name) jobs.put((sourceImage,", "containing the original .xpm images\") parser.add_argument(\"target\", help=\"the directory for the", "import sys import Image import Qtrac Result = collections.namedtuple(\"Result\", \"copied", "canceled\") def main(): size, smooth, source, target, concurrency = handle_commandline()", "parser.add_argument(\"-s\", \"--size\", default=400, type=int, help=\"make a scaled image that fits", "the given dimension \" \"[default: %(default)d]\") parser.add_argument(\"-S\", \"--smooth\", action=\"store_true\", help=\"use", "it under the terms of the GNU General Public License", "process = multiprocessing.Process(target=worker, args=(size, smooth, jobs, results)) process.daemon = True", "redistribute it # and/or modify it under the terms of", "and target must be different\") if not os.path.exists(args.target): os.makedirs(target) return", "May not work on Windows Qtrac.report(\"canceling...\") canceled = True copied", "os import sys import Image import Qtrac Result = collections.namedtuple(\"Result\",", "smooth, sourceImage, targetImage): oldImage = Image.from_file(sourceImage) if oldImage.width <= size", "canceled = True copied = scaled = 0 while not", "oldImage.save(targetImage) return Result(1, 0, targetImage) else: if smooth: scale =", "summarize(summary, concurrency) def handle_commandline(): parser = argparse.ArgumentParser() parser.add_argument(\"-c\", \"--concurrency\", type=int,", "message += \" [canceled]\" Qtrac.report(message) print() if __name__ == \"__main__\":", "targetImage = os.path.join(target, name) jobs.put((sourceImage, targetImage)) return todo def scale_one(size,", "parser.parse_args() source = os.path.abspath(args.source) target = os.path.abspath(args.target) if source ==", "Image import Qtrac Result = collections.namedtuple(\"Result\", \"copied scaled name\") Summary", "enumerate(os.listdir(source), start=1): sourceImage = os.path.join(source, name) targetImage = os.path.join(target, name)", "processes\".format(concurrency) if summary.canceled: message += \" [canceled]\" Qtrac.report(message) print() if", "jobs have finished result = results.get_nowait() copied += result.copied scaled", "later version. It is provided for # educational purposes and", "Qtrac.report(str(err), True) finally: jobs.task_done() def add_jobs(source, target, jobs): for todo,", "Qtrac Result = collections.namedtuple(\"Result\", \"copied scaled name\") Summary = collections.namedtuple(\"Summary\",", "GNU General Public License as # published by the Free", "def main(): size, smooth, source, target, concurrency = handle_commandline() Qtrac.report(\"starting...\")", "0, targetImage) else: if smooth: scale = min(size / oldImage.width,", "sourceImage, targetImage): oldImage = Image.from_file(sourceImage) if oldImage.width <= size and", "= int(math.ceil(max(oldImage.width / size, oldImage.height / size))) newImage = oldImage.subsample(stride)", "multiprocessing.Queue() create_processes(size, smooth, jobs, results, concurrency) todo = add_jobs(source, target,", "name) targetImage = os.path.join(target, name) jobs.put((sourceImage, targetImage)) return todo def", "it will be # useful, but WITHOUT ANY WARRANTY; without", "add_jobs(source, target, jobs) try: jobs.join() except KeyboardInterrupt: # May not", "summary.scaled) if difference: message += \"skipped {} \".format(difference) message +=", "target, concurrency) summarize(summary, concurrency) def handle_commandline(): parser = argparse.ArgumentParser() parser.add_argument(\"-c\",", "newImage = oldImage.scale(scale) else: stride = int(math.ceil(max(oldImage.width / size, oldImage.height", "is distributed in the hope that it will be #", "not work on Windows Qtrac.report(\"canceling...\") canceled = True copied =", "summary.todo - (summary.copied + summary.scaled) if difference: message += \"skipped", "sourceImage, targetImage) Qtrac.report(\"{} {}\".format(\"copied\" if result.copied else \"scaled\", os.path.basename(result.name))) results.put(result)", "result.copied scaled += result.scaled return Summary(todo, copied, scaled, canceled) def", "= True process.start() def worker(size, smooth, jobs, results): while True:", "for more details. import argparse import collections import math import", "# General Public License for more details. import argparse import", "%(default)d]\") parser.add_argument(\"-s\", \"--size\", default=400, type=int, help=\"make a scaled image that", "\".format(difference) message += \"using {} processes\".format(concurrency) if summary.canceled: message +=", "oldImage.subsample(stride) newImage.save(targetImage) return Result(0, 1, targetImage) def summarize(summary, concurrency): message", "import os import sys import Image import Qtrac Result =", "directory for the scaled .xpm images\") args = parser.parse_args() source", "# useful, but WITHOUT ANY WARRANTY; without even the implied", "help=\"the directory containing the original .xpm images\") parser.add_argument(\"target\", help=\"the directory", "License, or (at your option) any later version. It is", "scale_one(size, smooth, sourceImage, targetImage) Qtrac.report(\"{} {}\".format(\"copied\" if result.copied else \"scaled\",", "targetImage) else: if smooth: scale = min(size / oldImage.width, size", "smooth, jobs, results, concurrency) todo = add_jobs(source, target, jobs) try:", "as # published by the Free Software Foundation, either version", "Windows Qtrac.report(\"canceling...\") canceled = True copied = scaled = 0", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "results, concurrency): for _ in range(concurrency): process = multiprocessing.Process(target=worker, args=(size,", "todo def scale_one(size, smooth, sourceImage, targetImage): oldImage = Image.from_file(sourceImage) if", "default=400, type=int, help=\"make a scaled image that fits the given", "License for more details. import argparse import collections import math", "size, oldImage.height / size))) newImage = oldImage.subsample(stride) newImage.save(targetImage) return Result(0,", "scale_one(size, smooth, sourceImage, targetImage): oldImage = Image.from_file(sourceImage) if oldImage.width <=", "All rights reserved. # This program or module is free", "the GNU General Public License as # published by the", "Public License for more details. import argparse import collections import", "worker(size, smooth, jobs, results): while True: try: sourceImage, targetImage =", "(at your option) any later version. It is provided for", "target: args.error(\"source and target must be different\") if not os.path.exists(args.target):", "def handle_commandline(): parser = argparse.ArgumentParser() parser.add_argument(\"-c\", \"--concurrency\", type=int, default=multiprocessing.cpu_count(), help=\"specify", ".xpm images\") parser.add_argument(\"target\", help=\"the directory for the scaled .xpm images\")", "canceled = False jobs = multiprocessing.JoinableQueue() results = multiprocessing.Queue() create_processes(size,", "True: try: sourceImage, targetImage = jobs.get() try: result = scale_one(size,", "create_processes(size, smooth, jobs, results, concurrency): for _ in range(concurrency): process", "concurrency (for debugging and \" \"timing) [default: %(default)d]\") parser.add_argument(\"-s\", \"--size\",", "= scale_one(size, smooth, sourceImage, targetImage) Qtrac.report(\"{} {}\".format(\"copied\" if result.copied else", "result = scale_one(size, smooth, sourceImage, targetImage) Qtrac.report(\"{} {}\".format(\"copied\" if result.copied", "Free Software Foundation, either version 3 of the # License,", "program or module is free software: you can redistribute it", "scale(size, smooth, source, target, concurrency): canceled = False jobs =", "that fits the given dimension \" \"[default: %(default)d]\") parser.add_argument(\"-S\", \"--smooth\",", "in range(concurrency): process = multiprocessing.Process(target=worker, args=(size, smooth, jobs, results)) process.daemon", "for todo, name in enumerate(os.listdir(source), start=1): sourceImage = os.path.join(source, name)", "target, jobs) try: jobs.join() except KeyboardInterrupt: # May not work", "return args.size, args.smooth, source, target, args.concurrency def scale(size, smooth, source,", "= multiprocessing.Queue() create_processes(size, smooth, jobs, results, concurrency) todo = add_jobs(source,", "args = parser.parse_args() source = os.path.abspath(args.source) target = os.path.abspath(args.target) if", "_ in range(concurrency): process = multiprocessing.Process(target=worker, args=(size, smooth, jobs, results))", "for # educational purposes and is distributed in the hope", "original .xpm images\") parser.add_argument(\"target\", help=\"the directory for the scaled .xpm", "have finished result = results.get_nowait() copied += result.copied scaled +=", "educational purposes and is distributed in the hope that it", "# and/or modify it under the terms of the GNU", "handle_commandline() Qtrac.report(\"starting...\") summary = scale(size, smooth, source, target, concurrency) summarize(summary,", "message = \"copied {} scaled {} \".format(summary.copied, summary.scaled) difference =", "fits the given dimension \" \"[default: %(default)d]\") parser.add_argument(\"-S\", \"--smooth\", action=\"store_true\",", "size, smooth, source, target, concurrency = handle_commandline() Qtrac.report(\"starting...\") summary =", "Summary = collections.namedtuple(\"Summary\", \"todo copied scaled canceled\") def main(): size,", "os.path.join(source, name) targetImage = os.path.join(target, name) jobs.put((sourceImage, targetImage)) return todo", "published by the Free Software Foundation, either version 3 of", "the GNU # General Public License for more details. import", "Copyright © 2012-13 Qtrac Ltd. All rights reserved. # This", "parser.add_argument(\"target\", help=\"the directory for the scaled .xpm images\") args =", "concurrency): canceled = False jobs = multiprocessing.JoinableQueue() results = multiprocessing.Queue()", "args.size, args.smooth, source, target, args.concurrency def scale(size, smooth, source, target,", "smooth: scale = min(size / oldImage.width, size / oldImage.height) newImage", "the Free Software Foundation, either version 3 of the #", "version 3 of the # License, or (at your option)", "except Image.Error as err: Qtrac.report(str(err), True) finally: jobs.task_done() def add_jobs(source,", "or module is free software: you can redistribute it #", "\"--size\", default=400, type=int, help=\"make a scaled image that fits the", "results): while True: try: sourceImage, targetImage = jobs.get() try: result", "scale = min(size / oldImage.width, size / oldImage.height) newImage =", "= collections.namedtuple(\"Result\", \"copied scaled name\") Summary = collections.namedtuple(\"Summary\", \"todo copied", "results.get_nowait() copied += result.copied scaled += result.scaled return Summary(todo, copied,", "= collections.namedtuple(\"Summary\", \"todo copied scaled canceled\") def main(): size, smooth,", "\"skipped {} \".format(difference) message += \"using {} processes\".format(concurrency) if summary.canceled:", "GNU # General Public License for more details. import argparse", "jobs) try: jobs.join() except KeyboardInterrupt: # May not work on", "int(math.ceil(max(oldImage.width / size, oldImage.height / size))) newImage = oldImage.subsample(stride) newImage.save(targetImage)", "type=int, help=\"make a scaled image that fits the given dimension", "help=\"specify the concurrency (for debugging and \" \"timing) [default: %(default)d]\")", "copied, scaled, canceled) def create_processes(size, smooth, jobs, results, concurrency): for", "jobs = multiprocessing.JoinableQueue() results = multiprocessing.Queue() create_processes(size, smooth, jobs, results,", "jobs.put((sourceImage, targetImage)) return todo def scale_one(size, smooth, sourceImage, targetImage): oldImage", "images\") parser.add_argument(\"target\", help=\"the directory for the scaled .xpm images\") args", "todo = add_jobs(source, target, jobs) try: jobs.join() except KeyboardInterrupt: #", "Safe because all jobs have finished result = results.get_nowait() copied", "target = os.path.abspath(args.target) if source == target: args.error(\"source and target", "# Safe because all jobs have finished result = results.get_nowait()", "results.put(result) except Image.Error as err: Qtrac.report(str(err), True) finally: jobs.task_done() def", "and/or modify it under the terms of the GNU General", "0 while not results.empty(): # Safe because all jobs have", "source == target: args.error(\"source and target must be different\") if", "size and oldImage.height <= size: oldImage.save(targetImage) return Result(1, 0, targetImage)", "= 0 while not results.empty(): # Safe because all jobs", "\"--concurrency\", type=int, default=multiprocessing.cpu_count(), help=\"specify the concurrency (for debugging and \"", "scaled {} \".format(summary.copied, summary.scaled) difference = summary.todo - (summary.copied +", "# Copyright © 2012-13 Qtrac Ltd. All rights reserved. #", "import Qtrac Result = collections.namedtuple(\"Result\", \"copied scaled name\") Summary =", "= multiprocessing.Process(target=worker, args=(size, smooth, jobs, results)) process.daemon = True process.start()", "and is distributed in the hope that it will be", "\".format(summary.copied, summary.scaled) difference = summary.todo - (summary.copied + summary.scaled) if", "#!/usr/bin/env python3 # Copyright © 2012-13 Qtrac Ltd. All rights", "jobs, results): while True: try: sourceImage, targetImage = jobs.get() try:", "= os.path.abspath(args.target) if source == target: args.error(\"source and target must", "PURPOSE. See the GNU # General Public License for more", "handle_commandline(): parser = argparse.ArgumentParser() parser.add_argument(\"-c\", \"--concurrency\", type=int, default=multiprocessing.cpu_count(), help=\"specify the", "args.concurrency def scale(size, smooth, source, target, concurrency): canceled = False", "if difference: message += \"skipped {} \".format(difference) message += \"using", "details. import argparse import collections import math import multiprocessing import", "© 2012-13 Qtrac Ltd. All rights reserved. # This program", "+ summary.scaled) if difference: message += \"skipped {} \".format(difference) message", "terms of the GNU General Public License as # published", "return todo def scale_one(size, smooth, sourceImage, targetImage): oldImage = Image.from_file(sourceImage)", "+= result.copied scaled += result.scaled return Summary(todo, copied, scaled, canceled)", "help=\"make a scaled image that fits the given dimension \"", "smooth, source, target, concurrency): canceled = False jobs = multiprocessing.JoinableQueue()", "Public License as # published by the Free Software Foundation,", "main(): size, smooth, source, target, concurrency = handle_commandline() Qtrac.report(\"starting...\") summary", "summarize(summary, concurrency): message = \"copied {} scaled {} \".format(summary.copied, summary.scaled)", "<= size and oldImage.height <= size: oldImage.save(targetImage) return Result(1, 0,", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "os.path.abspath(args.source) target = os.path.abspath(args.target) if source == target: args.error(\"source and", "return Result(0, 1, targetImage) def summarize(summary, concurrency): message = \"copied", "useful, but WITHOUT ANY WARRANTY; without even the implied warranty", "+= \"using {} processes\".format(concurrency) if summary.canceled: message += \" [canceled]\"", "purposes and is distributed in the hope that it will", "if not os.path.exists(args.target): os.makedirs(target) return args.size, args.smooth, source, target, args.concurrency", "parser = argparse.ArgumentParser() parser.add_argument(\"-c\", \"--concurrency\", type=int, default=multiprocessing.cpu_count(), help=\"specify the concurrency", "/ size))) newImage = oldImage.subsample(stride) newImage.save(targetImage) return Result(0, 1, targetImage)", "targetImage): oldImage = Image.from_file(sourceImage) if oldImage.width <= size and oldImage.height", "= parser.parse_args() source = os.path.abspath(args.source) target = os.path.abspath(args.target) if source", "True copied = scaled = 0 while not results.empty(): #", "but WITHOUT ANY WARRANTY; without even the implied warranty of", "summary = scale(size, smooth, source, target, concurrency) summarize(summary, concurrency) def", "= argparse.ArgumentParser() parser.add_argument(\"-c\", \"--concurrency\", type=int, default=multiprocessing.cpu_count(), help=\"specify the concurrency (for", "def worker(size, smooth, jobs, results): while True: try: sourceImage, targetImage", "Result(1, 0, targetImage) else: if smooth: scale = min(size /", "can redistribute it # and/or modify it under the terms", "Ltd. All rights reserved. # This program or module is", "all jobs have finished result = results.get_nowait() copied += result.copied", "result.scaled return Summary(todo, copied, scaled, canceled) def create_processes(size, smooth, jobs,", "images\") args = parser.parse_args() source = os.path.abspath(args.source) target = os.path.abspath(args.target)", "provided for # educational purposes and is distributed in the", "newImage = oldImage.subsample(stride) newImage.save(targetImage) return Result(0, 1, targetImage) def summarize(summary,", "smooth scaling (slow but good for text)\") parser.add_argument(\"source\", help=\"the directory", "\"[default: %(default)d]\") parser.add_argument(\"-S\", \"--smooth\", action=\"store_true\", help=\"use smooth scaling (slow but", "or (at your option) any later version. It is provided", "= scale(size, smooth, source, target, concurrency) summarize(summary, concurrency) def handle_commandline():", "copied = scaled = 0 while not results.empty(): # Safe", "1, targetImage) def summarize(summary, concurrency): message = \"copied {} scaled", "def scale_one(size, smooth, sourceImage, targetImage): oldImage = Image.from_file(sourceImage) if oldImage.width", "min(size / oldImage.width, size / oldImage.height) newImage = oldImage.scale(scale) else:", "Qtrac.report(\"starting...\") summary = scale(size, smooth, source, target, concurrency) summarize(summary, concurrency)", "python3 # Copyright © 2012-13 Qtrac Ltd. All rights reserved.", "3 of the # License, or (at your option) any", "while not results.empty(): # Safe because all jobs have finished", "except KeyboardInterrupt: # May not work on Windows Qtrac.report(\"canceling...\") canceled", "import Image import Qtrac Result = collections.namedtuple(\"Result\", \"copied scaled name\")", "jobs.join() except KeyboardInterrupt: # May not work on Windows Qtrac.report(\"canceling...\")", "parser.add_argument(\"-c\", \"--concurrency\", type=int, default=multiprocessing.cpu_count(), help=\"specify the concurrency (for debugging and", "os.path.join(target, name) jobs.put((sourceImage, targetImage)) return todo def scale_one(size, smooth, sourceImage,", "type=int, default=multiprocessing.cpu_count(), help=\"specify the concurrency (for debugging and \" \"timing)", "%(default)d]\") parser.add_argument(\"-S\", \"--smooth\", action=\"store_true\", help=\"use smooth scaling (slow but good", "def create_processes(size, smooth, jobs, results, concurrency): for _ in range(concurrency):", "jobs): for todo, name in enumerate(os.listdir(source), start=1): sourceImage = os.path.join(source,", "= add_jobs(source, target, jobs) try: jobs.join() except KeyboardInterrupt: # May", "action=\"store_true\", help=\"use smooth scaling (slow but good for text)\") parser.add_argument(\"source\",", ".xpm images\") args = parser.parse_args() source = os.path.abspath(args.source) target =", "copied scaled canceled\") def main(): size, smooth, source, target, concurrency", "== target: args.error(\"source and target must be different\") if not", "given dimension \" \"[default: %(default)d]\") parser.add_argument(\"-S\", \"--smooth\", action=\"store_true\", help=\"use smooth", "= min(size / oldImage.width, size / oldImage.height) newImage = oldImage.scale(scale)", "by the Free Software Foundation, either version 3 of the", "(for debugging and \" \"timing) [default: %(default)d]\") parser.add_argument(\"-s\", \"--size\", default=400,", "\" \"timing) [default: %(default)d]\") parser.add_argument(\"-s\", \"--size\", default=400, type=int, help=\"make a", "todo, name in enumerate(os.listdir(source), start=1): sourceImage = os.path.join(source, name) targetImage", "results = multiprocessing.Queue() create_processes(size, smooth, jobs, results, concurrency) todo =", "range(concurrency): process = multiprocessing.Process(target=worker, args=(size, smooth, jobs, results)) process.daemon =", "add_jobs(source, target, jobs): for todo, name in enumerate(os.listdir(source), start=1): sourceImage", "multiprocessing.Process(target=worker, args=(size, smooth, jobs, results)) process.daemon = True process.start() def", "(summary.copied + summary.scaled) if difference: message += \"skipped {} \".format(difference)", "import math import multiprocessing import os import sys import Image", "debugging and \" \"timing) [default: %(default)d]\") parser.add_argument(\"-s\", \"--size\", default=400, type=int,", "scaling (slow but good for text)\") parser.add_argument(\"source\", help=\"the directory containing", "copied += result.copied scaled += result.scaled return Summary(todo, copied, scaled,", "jobs, results, concurrency): for _ in range(concurrency): process = multiprocessing.Process(target=worker,", "= False jobs = multiprocessing.JoinableQueue() results = multiprocessing.Queue() create_processes(size, smooth,", "because all jobs have finished result = results.get_nowait() copied +=", "smooth, source, target, concurrency = handle_commandline() Qtrac.report(\"starting...\") summary = scale(size,", "help=\"the directory for the scaled .xpm images\") args = parser.parse_args()", "import argparse import collections import math import multiprocessing import os", "must be different\") if not os.path.exists(args.target): os.makedirs(target) return args.size, args.smooth,", "General Public License for more details. import argparse import collections", "{} processes\".format(concurrency) if summary.canceled: message += \" [canceled]\" Qtrac.report(message) print()", "= handle_commandline() Qtrac.report(\"starting...\") summary = scale(size, smooth, source, target, concurrency)", "FOR A PARTICULAR PURPOSE. See the GNU # General Public", "\"copied {} scaled {} \".format(summary.copied, summary.scaled) difference = summary.todo -", "Software Foundation, either version 3 of the # License, or", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or", "and \" \"timing) [default: %(default)d]\") parser.add_argument(\"-s\", \"--size\", default=400, type=int, help=\"make", "= jobs.get() try: result = scale_one(size, smooth, sourceImage, targetImage) Qtrac.report(\"{}", "# educational purposes and is distributed in the hope that", "PARTICULAR PURPOSE. See the GNU # General Public License for", "target must be different\") if not os.path.exists(args.target): os.makedirs(target) return args.size,", "of the # License, or (at your option) any later", "if result.copied else \"scaled\", os.path.basename(result.name))) results.put(result) except Image.Error as err:", "jobs, results)) process.daemon = True process.start() def worker(size, smooth, jobs,", "hope that it will be # useful, but WITHOUT ANY", "stride = int(math.ceil(max(oldImage.width / size, oldImage.height / size))) newImage =", "concurrency) def handle_commandline(): parser = argparse.ArgumentParser() parser.add_argument(\"-c\", \"--concurrency\", type=int, default=multiprocessing.cpu_count(),", "Qtrac Ltd. All rights reserved. # This program or module", "{} \".format(summary.copied, summary.scaled) difference = summary.todo - (summary.copied + summary.scaled)", "targetImage)) return todo def scale_one(size, smooth, sourceImage, targetImage): oldImage =", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "message += \"skipped {} \".format(difference) message += \"using {} processes\".format(concurrency)", "err: Qtrac.report(str(err), True) finally: jobs.task_done() def add_jobs(source, target, jobs): for", "True) finally: jobs.task_done() def add_jobs(source, target, jobs): for todo, name", "without even the implied warranty of # MERCHANTABILITY or FITNESS", "(slow but good for text)\") parser.add_argument(\"source\", help=\"the directory containing the" ]
[ "with self.subTest('dos', catalog=catalog): log.info('Resolving file %s with DOS', file_uuid) response", "# some notifications will end up being sent three or", "'samples', 'bundles'] def _assert_catalog_empty(self, catalog: CatalogName): for entity_type in self.entity_types:", "import threading import time from typing import ( AbstractSet, Any,", "JSON] @property def num_bundles(self): return len(self.notifications) @property def bundle_fqids(self) ->", "random order so we need to have a # deterministic", "log = logging.getLogger(__name__) # noinspection PyPep8Naming def setUpModule(): configure_test_logging(log) class", "service_paths ) health_endpoints = ( config.service_endpoint(), config.indexer_endpoint() ) health_paths =", "kwargs['spec'] def mock_boto3_client(service, *args, **kwargs): if service == 's3': mock_s3", "(200,)): self.assertIn(response.status_code, expected_statuses, (response.reason, response.content)) def _check_manifest(self, _catalog: CatalogName, response:", "executor: futures = [executor.submit(run, i) for i in range(n_tasks)] self.assertTrue(all(f.result()", "f in manifest), set(metadata.keys())) for f in manifest: self.assertIn('s3_etag', f)", "%.3fs to execute.', attempt + 1, attempts, time.time() - start)", "catalogs. purge_queues=not config.is_stable_deployment(), delete_indices=True, create_indices=True) def _test_other_endpoints(self): service_paths = (", "unittest from unittest import ( mock, ) import uuid from", "params) def test(self): @attr.s(auto_attribs=True, kw_only=True) class Catalog: name: CatalogName notifications:", "self.azul_client.repository_plugin(catalog) drs = repository_plugin.drs_client() for access_method in AccessMethod: with self.subTest('drs',", "but we know that this token is the same as", "return a Bearer token in the `headers` # part of", "= json.loads(response)['data_object'] file_url = first(json_data['urls'])['url'] while True: response = self._get_url(file_url,", "format_ is not None: params['format'] = format_ for attempt in", "{} if query is None else {k: str(v) for k,", "rows = list(reader) log.info(f'Manifest contains {len(rows)} rows.') self.assertGreater(len(rows), 0) self.assertIn(uuid_field_name,", "file') ] * len(metadata) else: expected = [ ('debug', 'Loading", "portals database. \"\"\" # Currently takes about 50 seconds and", "log each request if response.status_code in (301, 302): file_url =", "CatalogName): for format_, validator, attempts in [ (None, self._check_manifest, 1),", "import re import sys import threading import time from typing", "typing import ( AbstractSet, Any, Dict, IO, List, Mapping, Optional,", "hits = json.loads(response) return one(one(hits['hits'])['files'])['uuid'] def _test_dos_and_drs(self, catalog: CatalogName): if", "-> None: super().setUpClass() app_module = load_app_module('service') app_dir = os.path.dirname(app_module.__file__) factory", "self.assertRaises(self.SpecialError): mini_dss.get_file(uuid, version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_native_file_url(uuid, version, 'aws') class", "'Failed getting file') ] * len(metadata) else: expected = [", "from io import ( BytesIO, TextIOWrapper, ) import json import", "def tearDownClass(cls) -> None: cls.server.shutdown() cls.server_thread.join() super().tearDownClass() def test_local_chalice_health_endpoint(self): url", "being sent three or more times. notifications.extend(random.choices(notifications, k=num_duplicates)) return notifications", "from hca.dss import ( DSSClient, ) from hca.util import (", "file (see https://en.wikipedia.org/wiki/FASTQ_format). self.assertTrue(lines[0].startswith(b'@')) self.assertTrue(lines[2].startswith(b'+')) def _prepare_notifications(self, catalog: CatalogName) ->", "import chalice.cli from furl import ( furl, ) from google.cloud", "sum(catalog.num_bundles for catalog in catalogs) self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles, min_timeout=self.min_timeout) # For faster", "gzip from io import ( BytesIO, TextIOWrapper, ) import json", "replica=replica, fallback=fallback): response = dss_client.post_search(es_query=query, replica=replica, per_page=10) bundle_uuid, _, bundle_version", "query is None else {k: str(v) for k, v in", "\"manifest.version\": { \"gte\": \"2019-04-01\" } } } ] } }", "ZipFile, ) import attr import chalice.cli from furl import (", "file %s'), ('debug', 'Loading object %s'), # file ('debug', 'Loading", "more than the expected %i.', len(indexed_fqids), num_bundles) break elif time.time()", "import ( DSSClient, ) from hca.util import ( SwaggerAPIException, )", "because it's more efficient, fall back to GET if the", "to False. Subsequent runs will use # catalogs from first", "('warning', 'Failed getting bundle') ] + [ ('warning', 'Failed getting", "def setUpClass(cls) -> None: super().setUpClass() app_module = load_app_module('service') app_dir =", "character of first and third line of file (see https://en.wikipedia.org/wiki/FASTQ_format).", "response.status_code) catalog = first(config.integration_test_catalogs.keys()) def test_local_chalice_index_endpoints(self): url = self.url.copy().set(path='index/files', query=dict(catalog=self.catalog)).url", "catalog: CatalogName): for format_, validator, attempts in [ (None, self._check_manifest,", "= {'genusSpecies': {'is': ['Homo sapiens']}} url = self.url.copy().set(path='index/files', query=dict(filters=json.dumps(filters), catalog=self.catalog)).url", "# We handle redirects ourselves so we can log each", "import os import random import re import sys import threading", "with. response = drs_client.http_client.request(method, access.url) if response.status != 403: break", "def _test_dos_and_drs(self, catalog: CatalogName): if config.is_dss_enabled(catalog) and config.dss_direct_access: file_uuid =", "prefix %r.', catalog, catalog.num_bundles, self.bundle_uuid_prefix) self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates()) _wait_for_indexer() for catalog", "version=bundle_version, num_workers=config.num_dss_workers) log.info('Captured log calls: %r', captured_log.mock_calls) self.assertGreater(len(metadata), 0) self.assertGreater(set(f['name']", "the integration test catalog, the queues are # shared by", "self.azul_client.index, first(config.integration_test_catalogs), notifications) class PortalRegistrationIntegrationTest(IntegrationTestCase): # FIXME: Re-enable once overloading", "self.num_bundles // 2 notifications = list(self.notifications.values()) # Index some bundles", "(*service_routes, *health_routes): with self.subTest('other_endpoints', endpoint=endpoint, path=path): self._check_endpoint(endpoint, path) def _test_manifest(self,", "return original(service, *args, **kwargs) return mock_boto3_client return mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock)", "CatalogName, bundle_fqids: Sequence[BundleFQID], max_bundles: int ) -> List[BundleFQID]: seed =", "_check_manifest(self, _catalog: CatalogName, response: bytes): self.__check_manifest(BytesIO(response), 'bundle_uuid') def _check_terra_bdbag(self, catalog:", "catalogs from first run. Don't commit changes to these two", "( BytesIO, TextIOWrapper, ) import json import logging import os", "row['entity:participant_id']) suffix = '__file_drs_uri' header, *rows = rows prefixes =", "ThreadPoolExecutor, ) from contextlib import ( contextmanager, ) import csv", "Bearer token in the `headers` # part of the DRS", "will end up being sent three or more times. notifications.extend(random.choices(notifications,", ") log.info('Detected %i of %i bundles in %i hits for", "obsolete_fqids) num_bundles = len(expected_fqids) timeout = 600 indexed_fqids = set()", "the log method name and the first three words of", "in captured_log.mock_calls] if direct: if replica == 'aws': if fallback:", "%r with %i bundles from prefix %r.', catalog, catalog.num_bundles, self.bundle_uuid_prefix)", "catalog: CatalogName, response: bytes): with ZipFile(BytesIO(response)) as zip_fh: data_path =", "delimiter='\\t') rows = list(reader) log.info(f'Manifest contains {len(rows)} rows.') self.assertGreater(len(rows), 0)", "as e: dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1', version='2018-11-19T232756.056947Z', replica='aws') self.assertEqual(e.exception.reason, 'not_found') def test_mini_dss_failures(self): uuid", "self._check_endpoint(endpoint=config.service_endpoint(), path='/index/files', query=dict(catalog=catalog, filters=json.dumps(filters), size=1, order='asc', sort='fileSize')) hits = json.loads(response)", "set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids)) obsolete_fqids = bundle_fqids - expected_fqids if obsolete_fqids: log.debug('Ignoring obsolete", "FIXME: Re-enable once overloading of S3 API is resolved #", "'/', '/openapi', '/version', '/index/summary', '/index/files/order', ) service_routes = ( (config.service_endpoint(),", "for bundle in hit.get('bundles', []) ) log.info('Detected %i of %i", "= format_ for attempt in range(attempts): start = time.time() response", "def test_get_file_fail(self): for direct in {config.dss_direct_access, False}: with self.subTest(direct=direct): dss_client", "versions %r', obsolete_fqids) num_bundles = len(expected_fqids) timeout = 600 indexed_fqids", "query, dss_client, replica, fallback=True) dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client,", "response = self._check_endpoint(config.service_endpoint(), path=drs.dos_object_url_path(file_uuid), query=dict(catalog=catalog)) json_data = json.loads(response)['data_object'] file_url =", "and delete: for catalog in catalogs: self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates(), delete=True) _wait_for_indexer()", "PortalRegistrationIntegrationTest(IntegrationTestCase): # FIXME: Re-enable once overloading of S3 API is", "sent three or more times. notifications.extend(random.choices(notifications, k=num_duplicates)) return notifications def", "times so # some notifications will end up being sent", "= {} if query is None else {k: str(v) for", "exist in versions indexed by TDR self.assertNotIn(':', row['entity:participant_id']) suffix =", "to test that we handle duplicate additions. # Note: random.choices()", "from more_itertools import ( first, one, ) from openapi_spec_validator import", "import json import logging import os import random import re", "return requests_session_with_retry_after() def _check_endpoint(self, endpoint: str, path: str, query: Optional[Mapping[str,", "# Currently takes about 50 seconds and creates a 25", "dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica, fallback=True) dss_client =", "%s'), ('debug', 'Loading object %s'), ('warning', 'Error accessing file'), ('warning',", "# Try HEAD first because it's more efficient, fall back", "response = self._check_endpoint(endpoint=config.service_endpoint(), path=f'/fetch/repository/files/{file_uuid}', query=dict(catalog=catalog)) response = json.loads(response) while response['Status']", "len(metadata) else: expected = [ ('debug', 'Loading bundle %s'), ('debug',", "drs_uri, name, catalog, size) plugin = self.azul_client.repository_plugin(catalog) drs_client = plugin.drs_client()", "None: params['format'] = format_ for attempt in range(attempts): start =", "{ \"range\": { \"manifest.version\": { \"gte\": \"2019-04-01\" } } }", "DSS, any HTTP client should do but for TDR we", "= (200,)): self.assertIn(response.status_code, expected_statuses, (response.reason, response.content)) def _check_manifest(self, _catalog: CatalogName,", "they may contain work for non-IT catalogs. purge_queues=not config.is_stable_deployment(), delete_indices=True,", "format_, validator, attempts in [ (None, self._check_manifest, 1), ('compact', self._check_manifest,", "( (endpoint, '/health' + path) for endpoint in health_endpoints for", "sorted(actual)) def test_get_file_fail(self): for direct in {config.dss_direct_access, False}: with self.subTest(direct=direct):", "expected = [ ('warning', 'Failed getting bundle') ] + [", "entity_type) self.assertEqual([], [hit['entryId'] for hit in hits]) def _get_entities(self, catalog:", ") import attr import chalice.cli from furl import ( furl,", "requests.get(service + '/') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['content-type'], 'text/html') self.assertGreater(len(response.content), 0) #", "_failing_s3_get_object(self): def make_mock(**kwargs): original = kwargs['spec'] def mock_boto3_client(service, *args, **kwargs):", "@property def num_bundles(self): return len(self.notifications) @property def bundle_fqids(self) -> AbstractSet[BundleFQID]:", "%s on try #%i.', len(indexed_fqids), num_bundles, len(hits), entity_type, retries) if", "( AlwaysTearDownTestCase, AzulTestCase, ) log = logging.getLogger(__name__) # noinspection PyPep8Naming", "use # catalogs from first run. Don't commit changes to", "start) validator(catalog, response) @lru_cache(maxsize=None) def _get_one_file_uuid(self, catalog: CatalogName) -> str:", "Assert first character of first and third line of file", "('compact', self._check_manifest, 1), ('full', self._check_manifest, 3), ('terra.bdbag', self._check_terra_bdbag, 1) ]:", "return len(self.notifications) @property def bundle_fqids(self) -> AbstractSet[BundleFQID]: return self.notifications.keys() def", "spec=True, new_callable=make_mock) def _test_dss_client(self, direct: bool, query: JSON, dss_client: DSSClient,", "we handle duplicate additions. # Note: random.choices() may pick the", "_get_gs_url_content(self, url: str) -> bytes: self.assertTrue(url.startswith('gs://')) path = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] credentials", "name and the first three words of log # message", "index and delete: for catalog in catalogs: self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates(), delete=True)", "= furl(url=config.service_endpoint(), path=('index', entity_type), query_params=params ).url while True: response =", "attr import chalice.cli from furl import ( furl, ) from", "catalog in catalogs) self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles, min_timeout=self.min_timeout) # For faster modify-deploy-test cycles,", "size, drs_uri, name = min( ( int(row[prefix + '__file_size']), row[prefix", "( first, one, ) from openapi_spec_validator import ( validate_spec, )", "response.headers['Retry-After'] except KeyError: pass else: time.sleep(int(retry_after)) else: break self._assertResponseStatus(response) self._validate_fastq_content(response.content)", "self.assertRaises(self.SpecialError): mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'}) with self.assertRaises(self.SpecialError): mini_dss.get_bundle(uuid, version, 'aws') with", "some bundles again to test that we handle duplicate additions.", "f in manifest: self.assertIn('s3_etag', f) # Extract the log method", "# exist in versions indexed by TDR self.assertNotIn(':', row['entity:participant_id']) suffix", "takes about 50 seconds and creates a 25 kb db", "portal] self.assertEqual(old_entries, old_db) mock_counts = [portal['mock-count'] for portal in new_db", "self._get_url(response['Location']).json() content = self._get_url_content(response['Location']) self._validate_fastq_content(content) def _test_drs(self, catalog: CatalogName, file_uuid:", ") from contextlib import ( contextmanager, ) import csv from", "On `gcp` the precondition check fails right away, preventing any", "as captured_log: _, manifest, metadata = download_bundle_metadata(client=dss_client, replica=replica, uuid=bundle_uuid, version=bundle_version,", "} self.maxDiff = None for direct in {config.dss_direct_access, False}: for", "obsolete_fqids = bundle_fqids - expected_fqids if obsolete_fqids: log.debug('Ignoring obsolete bundle", "buf: fastq = buf.read(1024 * 1024) lines = fastq.splitlines() #", "path='/index/files', query=dict(catalog=catalog, filters=json.dumps(filters), size=1, order='asc', sort='fileSize')) hits = json.loads(response) return", "a Bearer token in the `headers` # part of the", "self.subTest('catalog_complete', catalog=catalog): expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids)) obsolete_fqids = bundle_fqids - expected_fqids", "entity_type: str, bundle_fqids: AbstractSet[BundleFQID]) -> None: with self.subTest('catalog_complete', catalog=catalog): expected_fqids", "= repository_plugin.drs_client() for access_method in AccessMethod: with self.subTest('drs', catalog=catalog, access_method=AccessMethod.https):", "default keys for lambda '/', # all keys '/basic', '/elasticsearch',", "redirects ourselves so we can log each request if response.status_code", "_wait_for_indexer() for catalog in catalogs: self._assert_catalog_empty(catalog.name) self._test_other_endpoints() def _reset_indexer(self): #", "self._assertResponseStatus(response, expected_statuses) return response def _assertResponseStatus(self, response: requests.Response, expected_statuses: Tuple[int,", "= response.headers['Location'] try: retry_after = response.headers['Retry-After'] except KeyError: pass else:", "= self._check_endpoint(endpoint=config.service_endpoint(), path='/index/files', query=dict(catalog=catalog, filters=json.dumps(filters), size=1, order='asc', sort='fileSize')) hits =", "self._get_url(url) body = response.json() hits = body['hits'] entities.extend(hits) url =", "yield except BaseException: log.info('Failed sub-test [%s] %r', msg, params) raise", "# Assert first character of first and third line of", "header.keys() if c.endswith(suffix) ] size, drs_uri, name = min( (", "= requests.get(service + '/openapi') response.raise_for_status() spec = response.json() validate_spec(spec) class", "len(indexed_fqids), num_bundles) break elif time.time() > deadline: log.error('Only found %i", "catalog in catalogs: self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates(), delete=True) _wait_for_indexer() for catalog in", "self._validate_fastq_content(response.content) def _get_gs_url_content(self, url: str) -> bytes: self.assertTrue(url.startswith('gs://')) path =", "import ( storage, ) from google.oauth2 import ( service_account, )", "DB') def test_concurrent_portal_db_crud(self): \"\"\" Use multithreading to simulate multiple users", "debugger will call # certain dunder methods on the variable,", "and the first three words of log # message logged.", "azul import ( CatalogName, cached_property, config, drs, ) from azul.azulclient", "catalog %r with %i bundles from prefix %r.', catalog, catalog.num_bundles,", "path in (*service_routes, *health_routes): with self.subTest('other_endpoints', endpoint=endpoint, path=path): self._check_endpoint(endpoint, path)", "self._get_url(url).content def _get_url(self, url: str, allow_redirects=True) -> requests.Response: log.info('GET %s',", "make_mock(**kwargs): original = kwargs['spec'] def mock_boto3_client(service, *args, **kwargs): if service", "for replica in 'aws', 'gcp': if direct: with self._failing_s3_get_object(): dss_client", "self.assertRaises(KeyError): mini_dss._get_blob_key({}) with self.assertRaises(self.SpecialError): mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'}) with self.assertRaises(self.SpecialError): mini_dss.get_bundle(uuid,", "%r', obsolete_fqids) num_bundles = len(expected_fqids) timeout = 600 indexed_fqids =", "100 params = dict(catalog=catalog, size=str(size)) url = furl(url=config.service_endpoint(), path=('index', entity_type),", "else: return original(service, *args, **kwargs) return mock_boto3_client return mock.patch('azul.deployment.aws.client', spec=True,", "self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles, min_timeout=self.min_timeout) # For faster modify-deploy-test cycles, set `delete` to", "None server_thread = None @classmethod def setUpClass(cls) -> None: super().setUpClass()", "functools import ( lru_cache, ) import gzip from io import", "with zip_fh.open(file_path) as file: rows = self.__check_manifest(file, 'bundle_uuid') for row", "= self._get_one_file_uuid(catalog) response = self._check_endpoint(endpoint=config.service_endpoint(), path=f'/fetch/repository/files/{file_uuid}', query=dict(catalog=catalog)) response = json.loads(response)", "def test_local_chalice_health_endpoint(self): url = self.url.copy().set(path='health').url response = requests.get(url) self.assertEqual(200, response.status_code)", "query = {} if query is None else {k: str(v)", "replica, fallback=False) else: dss_client = azul.dss.client() self._test_dss_client(direct, query, dss_client, replica,", "self._get_one_file_uuid(catalog) self._test_dos(catalog, file_uuid) self._test_drs(catalog, file_uuid) @cached_property def _requests(self) -> requests.Session:", "colons in this column, but they may # exist in", "with self.assertRaises(self.SpecialError): mini_dss.get_bundle(uuid, version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_file(uuid, version, 'aws')", "] if index: for catalog in catalogs: log.info('Starting integration test", "catalog.num_bundles, self.bundle_uuid_prefix) self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates()) _wait_for_indexer() for catalog in catalogs: self._assert_catalog_complete(catalog=catalog.name,", "for direct in {config.dss_direct_access, False}: with self.subTest(direct=direct): dss_client = azul.dss.direct_access_client()", "log.info('Resolving file %r with DRS using %r', file_uuid, access_method) drs_uri", "@classmethod def setUpClass(cls) -> None: super().setUpClass() app_module = load_app_module('service') app_dir", "def test_concurrent_portal_db_crud(self): \"\"\" Use multithreading to simulate multiple users simultaneously", "import ( AccessMethod, ) import azul.dss from azul.es import (", "portal in new_db if 'mock-count' not in portal] self.assertEqual(old_entries, old_db)", "self.assertEqual(200, response.status, response.data) self.assertEqual(size, int(response.headers['Content-Length'])) def __check_manifest(self, file: IO[bytes], uuid_field_name:", "with self.subTest('repository_files', catalog=catalog): file_uuid = self._get_one_file_uuid(catalog) response = self._check_endpoint(endpoint=config.service_endpoint(), path=f'/fetch/repository/files/{file_uuid}',", "= fastq.splitlines() # Assert first character of first and third", "faster modify-deploy-test cycles, set `delete` to False and run #", "version = '2018-11-19T232756.056947Z' with self._failing_s3_get_object(): mini_dss = azul.dss.MiniDSS(config.dss_endpoint) with self.assertRaises(self.SpecialError):", "AbstractSet[BundleFQID]: return self.notifications.keys() def notifications_with_duplicates(self) -> List[JSON]: num_duplicates = self.num_bundles", "CatalogName) -> str: filters = {'fileFormat': {'is': ['fastq.gz', 'fastq']}} response", "int ) -> List[BundleFQID]: seed = self.pruning_seed log.info('Selecting %i bundles", "in stable deployment as # they may contain work for", "azul.logging import ( configure_test_logging, ) from azul.modules import ( load_app_module,", "test catalog, the queues are # shared by all catalogs", "same random order so we need to have a #", "mock_entry = cast(JSON, { \"portal_id\": \"foo\", \"integrations\": [ { \"integration_id\":", "catalog in catalogs: self._assert_catalog_complete(catalog=catalog.name, entity_type='files', bundle_fqids=catalog.bundle_fqids) for catalog in catalogs:", "load_app_module('service') app_dir = os.path.dirname(app_module.__file__) factory = chalice.cli.factory.CLIFactory(app_dir) config = factory.create_config_obj()", "'Failed getting file') ] * len(metadata) else: expected = []", "response: requests.Response, expected_statuses: Tuple[int, ...] = (200,)): self.assertIn(response.status_code, expected_statuses, (response.reason,", "file_uuid = self._get_one_file_uuid(catalog) response = self._check_endpoint(endpoint=config.service_endpoint(), path=f'/fetch/repository/files/{file_uuid}', query=dict(catalog=catalog)) response =", "%s', url) response = self._requests.get(url, allow_redirects=allow_redirects) expected_statuses = (200,) if", "k=num_duplicates)) return notifications def _wait_for_indexer(): num_bundles = sum(catalog.num_bundles for catalog", "catalog=catalog): log.info('Resolving file %s with DOS', file_uuid) response = self._check_endpoint(config.service_endpoint(),", "'/elasticsearch', '/queues', '/progress', '/api_endpoints', '/other_lambdas' ) health_routes = ( (endpoint,", "up being sent three or more times. notifications.extend(random.choices(notifications, k=num_duplicates)) return", "the same as # the one we're making the DRS", "[ { \"exists\": { \"field\": \"files.project_json\" } }, { \"range\":", "size) plugin = self.azul_client.repository_plugin(catalog) drs_client = plugin.drs_client() access = drs_client.get_object(drs_uri,", "seconds.', len(indexed_fqids), num_bundles, timeout) break else: retries += 1 time.sleep(5)", "str]]: text = TextIOWrapper(file) reader = csv.DictReader(text, delimiter='\\t') rows =", "filtered_bundle_fqids def _assert_catalog_complete(self, catalog: CatalogName, entity_type: str, bundle_fqids: AbstractSet[BundleFQID]) ->", "str, path: str, query: Optional[Mapping[str, Any]] = None) -> bytes:", "of the DRS response but we know that this token", "('debug', 'Loading object %s') ] + [ ('debug', 'Loading file", "logged. Note that the PyCharm debugger will call # certain", "to GET if the # DRS implementations prohibits it, like", "service == 's3': mock_s3 = mock.MagicMock() mock_s3.get_object.side_effect = self.SpecialError() return", "// 2 notifications = list(self.notifications.values()) # Index some bundles again", "bundle_fqids: if len(filtered_bundle_fqids) < max_bundles: if self.azul_client.bundle_has_project_json(catalog, bundle_fqid): filtered_bundle_fqids.append(bundle_fqid) else:", "List[Catalog] = [ Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if index else {}) for", "projects, out of %i candidates, using random seed %i.', max_bundles,", "j in range(n_ops)}) # Reset to pre-test state. portal_service.overwrite(old_db) class", "download_bundle_metadata(client=dss_client, replica=replica, uuid=bundle_uuid, version=bundle_version, num_workers=config.num_dss_workers) log.info('Captured log calls: %r', captured_log.mock_calls)", "= furl(endpoint, path=path, query=query) return self._get_url_content(url.url) def _get_url_content(self, url: str)", "while True: hits = self._get_entities(catalog, entity_type) indexed_fqids.update( BundleFQID(bundle['bundleUuid'], bundle['bundleVersion']) for", "response = self._get_url(url) body = response.json() hits = body['hits'] entities.extend(hits)", "direct: if replica == 'aws': if fallback: expected = [", "object %s'), # file ('debug', 'Loading object %s') # blob", "does return a Bearer token in the `headers` # part", "class OpenAPIIntegrationTest(AzulTestCase): def test_openapi(self): service = config.service_endpoint() response = requests.get(service", "else: dss_client = azul.dss.client() self._test_dss_client(direct, query, dss_client, replica, fallback=False) class", "302: self.assertEqual(301, response['Status']) response = self._get_url(response['Location']).json() content = self._get_url_content(response['Location']) self._validate_fastq_content(content)", ") log = logging.getLogger(__name__) # noinspection PyPep8Naming def setUpModule(): configure_test_logging(log)", "expected = [ ('debug', 'Loading bundle %s'), ('debug', 'Loading object", "* len(metadata) else: expected = [ ('debug', 'Loading bundle %s'),", "i in range(n_tasks)] self.assertTrue(all(f.result() is None for f in futures))", "log.info('Request %i/%i took %.3fs to execute.', attempt + 1, attempts,", "self.max_bundles) return { bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid) for bundle_fqid in bundle_fqids", "to these two lines. index = True delete = True", "( BundleFQID, ) from azul.indexer.index_service import ( IndexService, ) from", "it's OK to erase the integration test catalog, the queues", "method in ('HEAD', 'GET'): log.info('%s %s', method, access.url) # For", "also asserts that we can instantiate a local ES client", "that the PyCharm debugger will call # certain dunder methods", "= True delete = True if index: self._reset_indexer() catalogs: List[Catalog]", "of %i bundles in under %i seconds.', len(indexed_fqids), num_bundles, timeout)", "try: retry_after = response.headers['Retry-After'] except KeyError: pass else: time.sleep(int(retry_after)) else:", "'__file_drs_uri' header, *rows = rows prefixes = [ c[:-len(suffix)] for", "mock_s3.get_object.side_effect = self.SpecialError() return mock_s3 else: return original(service, *args, **kwargs)", "bundle versions %r', obsolete_fqids) num_bundles = len(expected_fqids) timeout = 600", "( config.service_endpoint(), config.indexer_endpoint() ) health_paths = ( '', # default", "self._check_endpoint(endpoint, path) def _test_manifest(self, catalog: CatalogName): for format_, validator, attempts", "_wait_for_indexer() for catalog in catalogs: self._assert_catalog_complete(catalog=catalog.name, entity_type='files', bundle_fqids=catalog.bundle_fqids) for catalog", "again to test that we handle duplicate additions. # Note:", "`delete` to False and run # test once. Then also", "import ( contextmanager, ) import csv from functools import (", "query=dict(catalog=catalog)) response = json.loads(response) while response['Status'] != 302: self.assertEqual(301, response['Status'])", "using %r', file_uuid, access_method) drs_uri = f'drs://{config.api_lambda_domain(\"service\")}/{file_uuid}' access = drs.get_object(drs_uri,", "**kwargs): if service == 's3': mock_s3 = mock.MagicMock() mock_s3.get_object.side_effect =", "entry_format = 'task={};op={}' def run(thread_count): for op_count in range(n_ops): mock_entry", "old_db) mock_counts = [portal['mock-count'] for portal in new_db if 'mock-count'", "file ('debug', 'Loading object %s') # blob ] * len(metadata)", "%s'), ('warning', 'Error accessing file'), ('warning', 'Failed getting file') ]", "cached_property, config, drs, ) from azul.azulclient import ( AzulClient, AzulClientNotificationError,", "'aws': if fallback: expected = [ ('debug', 'Loading bundle %s'),", "# Note: random.choices() may pick the same element multiple times", "response = self._get_url(file_url, allow_redirects=False) # We handle redirects ourselves so", "%s with DOS', file_uuid) response = self._check_endpoint(config.service_endpoint(), path=drs.dos_object_url_path(file_uuid), query=dict(catalog=catalog)) json_data", "\"foo\", \"integrations\": [ { \"integration_id\": \"bar\", \"entity_type\": \"project\", \"integration_type\": \"get\",", "first(json_data['urls'])['url'] while True: response = self._get_url(file_url, allow_redirects=False) # We handle", "if format_ is not None: params['format'] = format_ for attempt", "self.SpecialError() return mock_s3 else: return original(service, *args, **kwargs) return mock_boto3_client", "], \"mock-count\": entry_format.format(thread_count, op_count) }) portal_service._crud(lambda db: list(db) + [mock_entry])", "= 10 n_tasks = n_threads * 10 n_ops = 5", "additions. # Note: random.choices() may pick the same element multiple", "{ \"portal_id\": \"foo\", \"integrations\": [ { \"integration_id\": \"bar\", \"entity_type\": \"project\",", "try: yield except BaseException: log.info('Failed sub-test [%s] %r', msg, params)", "('debug', 'Loading object %s'), ('warning', 'Error accessing file'), ('warning', 'Failed", "'', # default keys for lambda '/', # all keys", "test(self): @attr.s(auto_attribs=True, kw_only=True) class Catalog: name: CatalogName notifications: Mapping[BundleFQID, JSON]", "= csv.DictReader(text, delimiter='\\t') rows = list(reader) log.info(f'Manifest contains {len(rows)} rows.')", "list(reader) log.info(f'Manifest contains {len(rows)} rows.') self.assertGreater(len(rows), 0) self.assertIn(uuid_field_name, reader.fieldnames) bundle_uuid", "mock.patch('azul.dss.logger') as captured_log: _, manifest, metadata = download_bundle_metadata(client=dss_client, replica=replica, uuid=bundle_uuid,", "drs_client.get_object(drs_uri, access_method=AccessMethod.https) self.assertIsNone(access.headers) self.assertEqual('https', furl(access.url).scheme) # Try HEAD first because", "CatalogName): if config.is_dss_enabled(catalog) and config.dss_direct_access: file_uuid = self._get_one_file_uuid(catalog) self._test_dos(catalog, file_uuid)", "params = dict(catalog=catalog, size=str(size)) url = furl(url=config.service_endpoint(), path=('index', entity_type), query_params=params", "%s ', sorted(expected_fqids)) retries = 0 deadline = time.time() +", "right away, preventing any attempts of direct access expected =", "set(metadata.keys())) for f in manifest: self.assertIn('s3_etag', f) # Extract the", "'aws') with self.assertRaises(self.SpecialError): mini_dss.get_native_file_url(uuid, version, 'aws') class AzulChaliceLocalIntegrationTest(AzulTestCase): url =", "so we need to have a # deterministic order in", "google.cloud import ( storage, ) from google.oauth2 import ( service_account,", "KeyError: pass else: time.sleep(int(retry_after)) else: break self._assertResponseStatus(response) self._validate_fastq_content(response.content) def _get_gs_url_content(self,", "use an # authenticated client. TDR does return a Bearer", "False}: with self.subTest(direct=direct): dss_client = azul.dss.direct_access_client() if direct else azul.dss.client()", "the randomly ordered input until we have the # desired", "\"entity_ids\": [\"baz\"] } ], \"mock-count\": entry_format.format(thread_count, op_count) }) portal_service._crud(lambda db:", "( mock, ) import uuid from zipfile import ( ZipFile,", "['fastq.gz', 'fastq']}} response = self._check_endpoint(endpoint=config.service_endpoint(), path='/index/files', query=dict(catalog=catalog, filters=json.dumps(filters), size=1, order='asc',", "* 10 n_ops = 5 portal_service = PortalService() entry_format =", "= portal_service.read() with ThreadPoolExecutor(max_workers=n_threads) as executor: futures = [executor.submit(run, i)", "self.assertGreater(set(f['name'] for f in manifest), set(metadata.keys())) for f in manifest:", "j) for i in range(n_tasks) for j in range(n_ops)}) #", "> 0 params = dict(catalog=catalog) if format_ is not None:", "''.join([ str(random.choice('abcdef0123456789')) for _ in range(cls.prefix_length) ]) def setUp(self) ->", "= os.path.dirname(app_module.__file__) factory = chalice.cli.factory.CLIFactory(app_dir) config = factory.create_config_obj() cls.server =", "azul.azulclient import ( AzulClient, AzulClientNotificationError, ) from azul.drs import (", "def _check_terra_bdbag(self, catalog: CatalogName, response: bytes): with ZipFile(BytesIO(response)) as zip_fh:", "if url is None: break return entities def _assert_indices_exist(self, catalog:", "expected_statuses, (response.reason, response.content)) def _check_manifest(self, _catalog: CatalogName, response: bytes): self.__check_manifest(BytesIO(response),", "= random.randint(0, sys.maxsize) @contextmanager def subTest(self, msg: Any = None,", "azul.modules import ( load_app_module, ) from azul.portal_service import ( PortalService,", "allow_redirects=True) -> requests.Response: log.info('GET %s', url) response = self._requests.get(url, allow_redirects=allow_redirects)", "allow_redirects=False) # We handle redirects ourselves so we can log", "[%s] %r', msg, params) def test(self): @attr.s(auto_attribs=True, kw_only=True) class Catalog:", "[ { \"integration_id\": \"bar\", \"entity_type\": \"project\", \"integration_type\": \"get\", \"entity_ids\": [\"baz\"]", "None) -> bytes: query = {} if query is None", "leading to failed # assertions. actual = [(m, ' '.join(re.split(r'[\\s,]',", "(config.service_endpoint(), path) for path in service_paths ) health_endpoints = (", "if query is None else {k: str(v) for k, v", "will use # catalogs from first run. Don't commit changes", "do but for TDR we need to use an #", "self._get_url_content(access.url) elif access.method is AccessMethod.gs: content = self._get_gs_url_content(access.url) else: self.fail(access_method)", "1024) lines = fastq.splitlines() # Assert first character of first", "assert attempts > 0 params = dict(catalog=catalog) if format_ is", "def _failing_s3_get_object(self): def make_mock(**kwargs): original = kwargs['spec'] def mock_boto3_client(service, *args,", "def _check_endpoint(self, endpoint: str, path: str, query: Optional[Mapping[str, Any]] =", "-> List[Mapping[str, str]]: text = TextIOWrapper(file) reader = csv.DictReader(text, delimiter='\\t')", "input until we have the # desired number of bundles", "bundle_fqids = self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles) return { bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid)", "%i bundles with projects, out of %i candidates, using random", ") import csv from functools import ( lru_cache, ) import", "self.azul_client.list_bundles(catalog) bundle_fqids = self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles) return { bundle_fqid: self.azul_client.synthesize_notification(catalog,", "can't afford to trash them in a stable # deployment", "with self.subTest('catalog_empty', catalog=catalog, entity_type=entity_type): hits = self._get_entities(catalog, entity_type) self.assertEqual([], [hit['entryId']", "'bundle_uuid') for row in rows: # Terra doesn't allow colons", "they may # exist in versions indexed by TDR self.assertNotIn(':',", "the DRS response but we know that this token is", "entity_type='files', bundle_fqids=catalog.bundle_fqids) for catalog in catalogs: self._test_manifest(catalog.name) self._test_dos_and_drs(catalog.name) self._test_repository_files(catalog.name) if", "threading.Thread(target=cls.server.serve_forever) cls.server_thread.start() @classmethod def tearDownClass(cls) -> None: cls.server.shutdown() cls.server_thread.join() super().tearDownClass()", "file_uuid) self._test_drs(catalog, file_uuid) @cached_property def _requests(self) -> requests.Session: return requests_session_with_retry_after()", "( download_bundle_metadata, ) from more_itertools import ( first, one, )", "with self.subTest('catalog_complete', catalog=catalog): expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids)) obsolete_fqids = bundle_fqids -", "self.assertEqual([], [hit['entryId'] for hit in hits]) def _get_entities(self, catalog: CatalogName,", "= self._check_endpoint(config.service_endpoint(), path=drs.dos_object_url_path(file_uuid), query=dict(catalog=catalog)) json_data = json.loads(response)['data_object'] file_url = first(json_data['urls'])['url']", "except BaseException: log.info('Failed sub-test [%s] %r', msg, params) raise else:", "AccessMethod.https: content = self._get_url_content(access.url) elif access.method is AccessMethod.gs: content =", "as # they may contain work for non-IT catalogs. purge_queues=not", "def _wait_for_indexer(): num_bundles = sum(catalog.num_bundles for catalog in catalogs) self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles,", "= plugin.drs_client() access = drs_client.get_object(drs_uri, access_method=AccessMethod.https) self.assertIsNone(access.headers) self.assertEqual('https', furl(access.url).scheme) #", "candidates, using random seed %i.', max_bundles, len(bundle_fqids), seed) random_ =", "__check_manifest(self, file: IO[bytes], uuid_field_name: str) -> List[Mapping[str, str]]: text =", "= dict(catalog=catalog, size=str(size)) url = furl(url=config.service_endpoint(), path=('index', entity_type), query_params=params ).url", "\"2019-04-01\" } } } ] } } } self.maxDiff =", "self.assertRaises(self.SpecialError): mini_dss.get_native_file_url(uuid, version, 'aws') class AzulChaliceLocalIntegrationTest(AzulTestCase): url = furl(scheme='http', host='127.0.0.1',", ") from azul.indexer import ( BundleFQID, ) from azul.indexer.index_service import", "params) with super().subTest(msg, **params): try: yield except BaseException: log.info('Failed sub-test", "requests_session_with_retry_after() def _check_endpoint(self, endpoint: str, path: str, query: Optional[Mapping[str, Any]]", "self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates(), delete=True) _wait_for_indexer() for catalog in catalogs: self._assert_catalog_empty(catalog.name) self._test_other_endpoints()", "= storage.Client(credentials=credentials) content = BytesIO() storage_client.download_blob_to_file(url, content) return content.getvalue() def", "the PyCharm debugger will call # certain dunder methods on", "mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock) def _test_dss_client(self, direct: bool, query: JSON, dss_client:", "import ( mock, ) import uuid from zipfile import (", "direct: with self._failing_s3_get_object(): dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica,", ") import unittest from unittest import ( mock, ) import", "None: super().setUp() self.pruning_seed = random.randint(0, sys.maxsize) @contextmanager def subTest(self, msg:", "chalice.cli from furl import ( furl, ) from google.cloud import", "fastq.splitlines() # Assert first character of first and third line", "> num_bundles: log.error('Found %i bundles, more than the expected %i.',", "access.method is AccessMethod.https: content = self._get_url_content(access.url) elif access.method is AccessMethod.gs:", "TDR self.assertNotIn(':', row['entity:participant_id']) suffix = '__file_drs_uri' header, *rows = rows", "response.data) self.assertEqual(size, int(response.headers['Content-Length'])) def __check_manifest(self, file: IO[bytes], uuid_field_name: str) ->", "**kwargs) return mock_boto3_client return mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock) def _test_dss_client(self, direct:", ") from hca.dss import ( DSSClient, ) from hca.util import", "catalog: CatalogName) -> Dict[BundleFQID, JSON]: bundle_fqids = self.azul_client.list_bundles(catalog) bundle_fqids =", "time.time() response = self._check_endpoint(config.service_endpoint(), '/manifest/files', params) log.info('Request %i/%i took %.3fs", "portal] self.assertEqual(len(mock_counts), len(set(mock_counts))) self.assertEqual(set(mock_counts), {entry_format.format(i, j) for i in range(n_tasks)", "import random import re import sys import threading import time", "3), ('terra.bdbag', self._check_terra_bdbag, 1) ]: with self.subTest('manifest', catalog=catalog, format=format_, attempts=attempts):", "min_timeout = 20 * 60 @classmethod def setUpClass(cls) -> None:", "= furl(scheme='http', host='127.0.0.1', port=8000) server = None server_thread = None", "%s'), ('debug', 'Loading object %s'), ('warning', 'Error accessing bundle'), ('warning',", "threading import time from typing import ( AbstractSet, Any, Dict,", "commit changes to these two lines. index = True delete", "def test_mini_dss_failures(self): uuid = 'acafefed-beef-4bad-babe-feedfa11afe1' version = '2018-11-19T232756.056947Z' with self._failing_s3_get_object():", "mini_dss = azul.dss.MiniDSS(config.dss_endpoint) with self.assertRaises(self.SpecialError): mini_dss._get_file_object(uuid, version) with self.assertRaises(KeyError): mini_dss._get_blob_key({})", "return self._get_url_content(url.url) def _get_url_content(self, url: str) -> bytes: return self._get_url(url).content", "from azul.logging import ( configure_test_logging, ) from azul.modules import (", "range(n_ops): mock_entry = cast(JSON, { \"portal_id\": \"foo\", \"integrations\": [ {", "for row in rows for prefix in prefixes if row[prefix", "the portals database. \"\"\" # Currently takes about 50 seconds", "* len(metadata) else: expected = [] self.assertSequenceEqual(sorted(expected), sorted(actual)) def test_get_file_fail(self):", "return entities def _assert_indices_exist(self, catalog: CatalogName): \"\"\" Aside from checking", ") from azul.indexer.index_service import ( IndexService, ) from azul.logging import", "DRS request with. response = drs_client.http_client.request(method, access.url) if response.status !=", "= portal_service.read() old_entries = [portal for portal in new_db if", "from concurrent.futures.thread import ( ThreadPoolExecutor, ) from contextlib import (", "= {} notifications = [invalid_notification] self.assertRaises(AzulClientNotificationError, self.azul_client.index, first(config.integration_test_catalogs), notifications) class", "contextmanager, ) import csv from functools import ( lru_cache, )", "version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_native_file_url(uuid, version, 'aws') class AzulChaliceLocalIntegrationTest(AzulTestCase): url", "DRS response but we know that this token is the", "path=path): self._check_endpoint(endpoint, path) def _test_manifest(self, catalog: CatalogName): for format_, validator,", "element multiple times so # some notifications will end up", "= sum(catalog.num_bundles for catalog in catalogs) self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles, min_timeout=self.min_timeout) # For", "row in rows for prefix in prefixes if row[prefix +", "self.assertEqual(set(mock_counts), {entry_format.format(i, j) for i in range(n_tasks) for j in", "( configure_test_logging, ) from azul.modules import ( load_app_module, ) from", "mock_s3 = mock.MagicMock() mock_s3.get_object.side_effect = self.SpecialError() return mock_s3 else: return", "and run # test once. Then also set `index` to", "def _test_drs(self, catalog: CatalogName, file_uuid: str): repository_plugin = self.azul_client.repository_plugin(catalog) drs", "S3 API is resolved # https://github.com/DataBiosphere/azul/issues/2399 @unittest.skipIf(True or config.is_main_deployment(), 'Test", "with self._failing_s3_get_object(): mini_dss = azul.dss.MiniDSS(config.dss_endpoint) with self.assertRaises(self.SpecialError): mini_dss._get_file_object(uuid, version) with", "for catalog in config.integration_test_catalogs ] if index: for catalog in", "response.json() hits = body['hits'] entities.extend(hits) url = body['pagination']['next'] if url", "log.info(f'Manifest contains {len(rows)} rows.') self.assertGreater(len(rows), 0) self.assertIn(uuid_field_name, reader.fieldnames) bundle_uuid =", "in (*service_routes, *health_routes): with self.subTest('other_endpoints', endpoint=endpoint, path=path): self._check_endpoint(endpoint, path) def", "catalog=catalog): expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids)) obsolete_fqids = bundle_fqids - expected_fqids if", "msg, params) def test(self): @attr.s(auto_attribs=True, kw_only=True) class Catalog: name: CatalogName", "service_paths = ( '/', '/openapi', '/version', '/index/summary', '/index/files/order', ) service_routes", "( AccessMethod, ) import azul.dss from azul.es import ( ESClientFactory,", "bytes: return self._get_url(url).content def _get_url(self, url: str, allow_redirects=True) -> requests.Response:", "need to have a # deterministic order in the input", "bundles from prefix %r.', catalog, catalog.num_bundles, self.bundle_uuid_prefix) self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates()) _wait_for_indexer()", "expected_statuses = (200,) if allow_redirects else (200, 301, 302) self._assertResponseStatus(response,", "AccessMethod.gs: content = self._get_gs_url_content(access.url) else: self.fail(access_method) self._validate_fastq_content(content) def _test_dos(self, catalog:", "afford to trash them in a stable # deployment like", "str) -> bytes: self.assertTrue(url.startswith('gs://')) path = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] credentials = service_account.Credentials.from_service_account_file(path)", "'aws') class AzulChaliceLocalIntegrationTest(AzulTestCase): url = furl(scheme='http', host='127.0.0.1', port=8000) server =", "( storage, ) from google.oauth2 import ( service_account, ) from", "= first(config.integration_test_catalogs.keys()) def test_local_chalice_index_endpoints(self): url = self.url.copy().set(path='index/files', query=dict(catalog=self.catalog)).url response =", "contains {len(rows)} rows.') self.assertGreater(len(rows), 0) self.assertIn(uuid_field_name, reader.fieldnames) bundle_uuid = rows[0][uuid_field_name]", "object %s') ] + [ ('debug', 'Loading file %s'), ('debug',", "file_uuid: str): repository_plugin = self.azul_client.repository_plugin(catalog) drs = repository_plugin.drs_client() for access_method", "entity_type, retries) if len(indexed_fqids) == num_bundles: log.info('Found the expected %i", "message logged. Note that the PyCharm debugger will call #", "= None server_thread = None @classmethod def setUpClass(cls) -> None:", "} } } ] } } } self.maxDiff = None", "AbstractSet, Any, Dict, IO, List, Mapping, Optional, Sequence, Tuple, cast,", "\"term\": { \"admin_deleted\": True } } ], \"must\": [ {", "import ( IndexService, ) from azul.logging import ( configure_test_logging, )", "0) self.assertIn(uuid_field_name, reader.fieldnames) bundle_uuid = rows[0][uuid_field_name] self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid))) return rows", "*args, **kwargs) return mock_boto3_client return mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock) def _test_dss_client(self,", "mock_boto3_client(service, *args, **kwargs): if service == 's3': mock_s3 = mock.MagicMock()", "delete = True if index: self._reset_indexer() catalogs: List[Catalog] = [", "[ Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if index else {}) for catalog in", "self.assertIn(response.status_code, expected_statuses, (response.reason, response.content)) def _check_manifest(self, _catalog: CatalogName, response: bytes):", "self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid))) return rows def _test_repository_files(self, catalog: str): with self.subTest('repository_files',", "bundles with project metadata. filtered_bundle_fqids = [] for bundle_fqid in", "[ c[:-len(suffix)] for c in header.keys() if c.endswith(suffix) ] size,", "content = self._get_url_content(access.url) elif access.method is AccessMethod.gs: content = self._get_gs_url_content(access.url)", "if response.status != 403: break self.assertEqual(200, response.status, response.data) self.assertEqual(size, int(response.headers['Content-Length']))", "'Error accessing bundle'), ('warning', 'Failed getting bundle') ] + [", "not None: params['format'] = format_ for attempt in range(attempts): start", "self._test_drs(catalog, file_uuid) @cached_property def _requests(self) -> requests.Session: return requests_session_with_retry_after() def", "this column, but they may # exist in versions indexed", "{k: str(v) for k, v in query.items()} url = furl(endpoint,", "plugin = self.azul_client.repository_plugin(catalog) drs_client = plugin.drs_client() access = drs_client.get_object(drs_uri, access_method=AccessMethod.https)", "bundle'), ('warning', 'Failed getting bundle') ] + [ ('debug', 'Loading", "num_bundles = sum(catalog.num_bundles for catalog in catalogs) self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles, min_timeout=self.min_timeout) #", "= self._get_gs_url_content(access.url) else: self.fail(access_method) self._validate_fastq_content(content) def _test_dos(self, catalog: CatalogName, file_uuid:", "self._check_terra_bdbag, 1) ]: with self.subTest('manifest', catalog=catalog, format=format_, attempts=attempts): assert attempts", "AbstractSet[BundleFQID]) -> None: with self.subTest('catalog_complete', catalog=catalog): expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids)) obsolete_fqids", "of log # message logged. Note that the PyCharm debugger", "making the DRS request with. response = drs_client.http_client.request(method, access.url) if", "Sequence, Tuple, cast, ) import unittest from unittest import (", "# they may contain work for non-IT catalogs. purge_queues=not config.is_stable_deployment(),", "file_uuid) @cached_property def _requests(self) -> requests.Session: return requests_session_with_retry_after() def _check_endpoint(self,", "response: bytes): with ZipFile(BytesIO(response)) as zip_fh: data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data')", "'text/html') self.assertGreater(len(response.content), 0) # validate OpenAPI spec response = requests.get(service", "\"bool\": { \"must_not\": [ { \"term\": { \"admin_deleted\": True }", "invalid_notification = {} notifications = [invalid_notification] self.assertRaises(AzulClientNotificationError, self.azul_client.index, first(config.integration_test_catalogs), notifications)", "== num_bundles: log.info('Found the expected %i bundles.', num_bundles) break elif", "entry_format.format(thread_count, op_count) }) portal_service._crud(lambda db: list(db) + [mock_entry]) old_db =", "rows[0][uuid_field_name] self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid))) return rows def _test_repository_files(self, catalog: str): with", "manifest, metadata = download_bundle_metadata(client=dss_client, replica=replica, uuid=bundle_uuid, version=bundle_version, num_workers=config.num_dss_workers) log.info('Captured log", "order so we need to have a # deterministic order", "log.info('GET %s', url) response = self._requests.get(url, allow_redirects=allow_redirects) expected_statuses = (200,)", "}, { \"range\": { \"manifest.version\": { \"gte\": \"2019-04-01\" } }", "def azul_client(self): return AzulClient(prefix=self.bundle_uuid_prefix) class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase): prefix_length = 2", "= (200,) if allow_redirects else (200, 301, 302) self._assertResponseStatus(response, expected_statuses)", "requests_session_with_retry_after, ) from azul.types import ( JSON, ) from azul_test_case", "end up being sent three or more times. notifications.extend(random.choices(notifications, k=num_duplicates))", "for op_count in range(n_ops): mock_entry = cast(JSON, { \"portal_id\": \"foo\",", "os.path.join(data_path, 'participants.tsv') with zip_fh.open(file_path) as file: rows = self.__check_manifest(file, 'bundle_uuid')", "seed = self.pruning_seed log.info('Selecting %i bundles with projects, out of", "'/queues', '/progress', '/api_endpoints', '/other_lambdas' ) health_routes = ( (endpoint, '/health'", "file: IO[bytes], uuid_field_name: str) -> List[Mapping[str, str]]: text = TextIOWrapper(file)", "random import re import sys import threading import time from", "access.url) if response.status != 403: break self.assertEqual(200, response.status, response.data) self.assertEqual(size,", "%i hits for entity type %s on try #%i.', len(indexed_fqids),", "break return entities def _assert_indices_exist(self, catalog: CatalogName): \"\"\" Aside from", "num_bundles) break elif len(indexed_fqids) > num_bundles: log.error('Found %i bundles, more", "1) ]: with self.subTest('manifest', catalog=catalog, format=format_, attempts=attempts): assert attempts >", "str): with self.subTest('repository_files', catalog=catalog): file_uuid = self._get_one_file_uuid(catalog) response = self._check_endpoint(endpoint=config.service_endpoint(),", "for index_name in service.index_names(catalog): self.assertTrue(es_client.indices.exists(index_name)) class AzulClientIntegrationTest(IntegrationTestCase): def test_azul_client_error_handling(self): invalid_notification", "None: cls.server.shutdown() cls.server_thread.join() super().tearDownClass() def test_local_chalice_health_endpoint(self): url = self.url.copy().set(path='health').url response", "and creates a 25 kb db file. n_threads = 10", "= self.num_bundles // 2 notifications = list(self.notifications.values()) # Index some", "portal in new_db if 'mock-count' in portal] self.assertEqual(len(mock_counts), len(set(mock_counts))) self.assertEqual(set(mock_counts),", "'Loading object %s') ] + [ ('debug', 'Loading file %s'),", "one(one(hits['hits'])['files'])['uuid'] def _test_dos_and_drs(self, catalog: CatalogName): if config.is_dss_enabled(catalog) and config.dss_direct_access: file_uuid", "= set() log.debug('Expecting bundles %s ', sorted(expected_fqids)) retries = 0", "the one we're making the DRS request with. response =", "= 'acafefed-beef-4bad-babe-feedfa11afe1' version = '2018-11-19T232756.056947Z' with self._failing_s3_get_object(): mini_dss = azul.dss.MiniDSS(config.dss_endpoint)", "1), ('full', self._check_manifest, 3), ('terra.bdbag', self._check_terra_bdbag, 1) ]: with self.subTest('manifest',", "mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'}) with self.assertRaises(self.SpecialError): mini_dss.get_bundle(uuid, version, 'aws') with self.assertRaises(self.SpecialError):", "import ( BundleFQID, ) from azul.indexer.index_service import ( IndexService, )", "trash them in a stable # deployment like production. self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs,", "first and third line of file (see https://en.wikipedia.org/wiki/FASTQ_format). self.assertTrue(lines[0].startswith(b'@')) self.assertTrue(lines[2].startswith(b'+'))", ") for endpoint, path in (*service_routes, *health_routes): with self.subTest('other_endpoints', endpoint=endpoint,", "a real, remote ES domain. \"\"\" es_client = ESClientFactory.get() service", "def _get_gs_url_content(self, url: str) -> bytes: self.assertTrue(url.startswith('gs://')) path = os.environ['GOOGLE_APPLICATION_CREDENTIALS']", "for c in header.keys() if c.endswith(suffix) ] size, drs_uri, name", "response = dss_client.post_search(es_query=query, replica=replica, per_page=10) bundle_uuid, _, bundle_version = response['results'][0]['bundle_fqid'].partition('.')", "lambda '/', # all keys '/basic', '/elasticsearch', '/queues', '/progress', '/api_endpoints',", "if index: for catalog in catalogs: log.info('Starting integration test for", "direct else azul.dss.client() with self.assertRaises(SwaggerAPIException) as e: dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1', version='2018-11-19T232756.056947Z', replica='aws')", "= chalice.cli.factory.CLIFactory(app_dir) config = factory.create_config_obj() cls.server = factory.create_local_server(app_obj=app_module.app, config=config, host=cls.url.host,", "portal_service.overwrite(old_db) class OpenAPIIntegrationTest(AzulTestCase): def test_openapi(self): service = config.service_endpoint() response =", "self.subTest(direct=direct, replica=replica, fallback=fallback): response = dss_client.post_search(es_query=query, replica=replica, per_page=10) bundle_uuid, _,", "self.fail(access_method) self._validate_fastq_content(content) def _test_dos(self, catalog: CatalogName, file_uuid: str): with self.subTest('dos',", "sub-test [%s] %r', msg, params) with super().subTest(msg, **params): try: yield", "drs_uri, name = min( ( int(row[prefix + '__file_size']), row[prefix +", "bundle_fqids = self.azul_client.list_bundles(catalog) bundle_fqids = self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles) return {", "create_indices=True) def _test_other_endpoints(self): service_paths = ( '/', '/openapi', '/version', '/index/summary',", ") service_routes = ( (config.service_endpoint(), path) for path in service_paths", "def test_azul_client_error_handling(self): invalid_notification = {} notifications = [invalid_notification] self.assertRaises(AzulClientNotificationError, self.azul_client.index,", "%r', msg, params) with super().subTest(msg, **params): try: yield except BaseException:", "may # exist in versions indexed by TDR self.assertNotIn(':', row['entity:participant_id'])", "i) for i in range(n_tasks)] self.assertTrue(all(f.result() is None for f", "import ( validate_spec, ) import requests from azul import (", "self._failing_s3_get_object(): mini_dss = azul.dss.MiniDSS(config.dss_endpoint) with self.assertRaises(self.SpecialError): mini_dss._get_file_object(uuid, version) with self.assertRaises(KeyError):", "def test_openapi(self): service = config.service_endpoint() response = requests.get(service + '/')", "query=query) return self._get_url_content(url.url) def _get_url_content(self, url: str) -> bytes: return", "json_data = json.loads(response)['data_object'] file_url = first(json_data['urls'])['url'] while True: response =", "from functools import ( lru_cache, ) import gzip from io", "' '.join(re.split(r'[\\s,]', a[0])[:3])) for m, a, k in captured_log.mock_calls] if", "real, remote ES domain. \"\"\" es_client = ESClientFactory.get() service =", "'/manifest/files', params) log.info('Request %i/%i took %.3fs to execute.', attempt +", "# Check signature of FASTQ file. with gzip.open(BytesIO(content)) as buf:", "k in captured_log.mock_calls] if direct: if replica == 'aws': if", "url is None: break return entities def _assert_indices_exist(self, catalog: CatalogName):", "port=8000) server = None server_thread = None @classmethod def setUpClass(cls)", "= 5 portal_service = PortalService() entry_format = 'task={};op={}' def run(thread_count):", "class SpecialError(Exception): pass def _failing_s3_get_object(self): def make_mock(**kwargs): original = kwargs['spec']", "bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid) for bundle_fqid in bundle_fqids } def _prune_test_bundles(self,", "factory.create_local_server(app_obj=app_module.app, config=config, host=cls.url.host, port=cls.url.port) cls.server_thread = threading.Thread(target=cls.server.serve_forever) cls.server_thread.start() @classmethod def", "+ '__file_name'], ) for row in rows for prefix in", "Any): log.info('Beginning sub-test [%s] %r', msg, params) with super().subTest(msg, **params):", "# blob ] * len(metadata) else: # On `gcp` the", "setUpModule(): configure_test_logging(log) class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta): bundle_uuid_prefix: str = '' @cached_property", "Mapping[BundleFQID, JSON] @property def num_bundles(self): return len(self.notifications) @property def bundle_fqids(self)", "service = config.service_endpoint() response = requests.get(service + '/') self.assertEqual(response.status_code, 200)", "%i.', max_bundles, len(bundle_fqids), seed) random_ = random.Random(x=seed) # The same", "to False and run # test once. Then also set", "#%i.', len(indexed_fqids), num_bundles, len(hits), entity_type, retries) if len(indexed_fqids) == num_bundles:", "subTest(self, msg: Any = None, **params: Any): log.info('Beginning sub-test [%s]", "@attr.s(auto_attribs=True, kw_only=True) class Catalog: name: CatalogName notifications: Mapping[BundleFQID, JSON] @property", "size=1, order='asc', sort='fileSize')) hits = json.loads(response) return one(one(hits['hits'])['files'])['uuid'] def _test_dos_and_drs(self,", "\"\"\" Use multithreading to simulate multiple users simultaneously modifying the", "f in futures)) new_db = portal_service.read() old_entries = [portal for", "catalog=catalog, access_method=AccessMethod.https): log.info('Resolving file %r with DRS using %r', file_uuid,", "= 600 indexed_fqids = set() log.debug('Expecting bundles %s ', sorted(expected_fqids))", "with %i bundles from prefix %r.', catalog, catalog.num_bundles, self.bundle_uuid_prefix) self.azul_client.index(catalog=catalog.name,", "es_client = ESClientFactory.get() service = IndexService() for index_name in service.index_names(catalog):", "in new_db if 'mock-count' not in portal] self.assertEqual(old_entries, old_db) mock_counts", "line of file (see https://en.wikipedia.org/wiki/FASTQ_format). self.assertTrue(lines[0].startswith(b'@')) self.assertTrue(lines[2].startswith(b'+')) def _prepare_notifications(self, catalog:", "path=drs.dos_object_url_path(file_uuid), query=dict(catalog=catalog)) json_data = json.loads(response)['data_object'] file_url = first(json_data['urls'])['url'] while True:", "dss_client = azul.dss.direct_access_client() if direct else azul.dss.client() with self.assertRaises(SwaggerAPIException) as", "non-IT catalogs. purge_queues=not config.is_stable_deployment(), delete_indices=True, create_indices=True) def _test_other_endpoints(self): service_paths =", "self.assertRaises(self.SpecialError): mini_dss._get_file_object(uuid, version) with self.assertRaises(KeyError): mini_dss._get_blob_key({}) with self.assertRaises(self.SpecialError): mini_dss._get_blob('/blobs/foo', {'content-type':", "['Homo sapiens']}} url = self.url.copy().set(path='index/files', query=dict(filters=json.dumps(filters), catalog=self.catalog)).url response = requests.get(url)", "<filename>test/integration_test.py from abc import ( ABCMeta, ) from concurrent.futures.thread import", "times. notifications.extend(random.choices(notifications, k=num_duplicates)) return notifications def _wait_for_indexer(): num_bundles = sum(catalog.num_bundles", "OpenAPIIntegrationTest(AzulTestCase): def test_openapi(self): service = config.service_endpoint() response = requests.get(service +", "version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_file(uuid, version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_native_file_url(uuid,", "in the input list. bundle_fqids = sorted(bundle_fqids) random_.shuffle(bundle_fqids) # Pick", "break elif len(indexed_fqids) > num_bundles: log.error('Found %i bundles, more than", "url = body['pagination']['next'] if url is None: break return entities", "self.assertIn('s3_etag', f) # Extract the log method name and the", "self.assertRaises(AzulClientNotificationError, self.azul_client.index, first(config.integration_test_catalogs), notifications) class PortalRegistrationIntegrationTest(IntegrationTestCase): # FIXME: Re-enable once", "with project metadata. filtered_bundle_fqids = [] for bundle_fqid in bundle_fqids:", "(response.reason, response.content)) def _check_manifest(self, _catalog: CatalogName, response: bytes): self.__check_manifest(BytesIO(response), 'bundle_uuid')", "deterministic order in the input list. bundle_fqids = sorted(bundle_fqids) random_.shuffle(bundle_fqids)", "-> bytes: return self._get_url(url).content def _get_url(self, url: str, allow_redirects=True) ->", "0) self.assertGreater(set(f['name'] for f in manifest), set(metadata.keys())) for f in", ") import azul.dss from azul.es import ( ESClientFactory, ) from", "= 2 max_bundles = 64 min_timeout = 20 * 60", "'application/json'}) with self.assertRaises(self.SpecialError): mini_dss.get_bundle(uuid, version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_file(uuid, version,", "-> None: super().setUp() self.pruning_seed = random.randint(0, sys.maxsize) @contextmanager def subTest(self,", "= self.azul_client.list_bundles(catalog) bundle_fqids = self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles) return { bundle_fqid:", "Terra doesn't allow colons in this column, but they may", "import ( furl, ) from google.cloud import ( storage, )", "params) log.info('Request %i/%i took %.3fs to execute.', attempt + 1,", "self._get_url_content(response['Location']) self._validate_fastq_content(content) def _test_drs(self, catalog: CatalogName, file_uuid: str): repository_plugin =", "Dict[BundleFQID, JSON]: bundle_fqids = self.azul_client.list_bundles(catalog) bundle_fqids = self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles)", "indexed_fqids = set() log.debug('Expecting bundles %s ', sorted(expected_fqids)) retries =", "first(config.integration_test_catalogs), notifications) class PortalRegistrationIntegrationTest(IntegrationTestCase): # FIXME: Re-enable once overloading of", "allow_redirects=allow_redirects) expected_statuses = (200,) if allow_redirects else (200, 301, 302)", "catalog, the queues are # shared by all catalogs and", "c in header.keys() if c.endswith(suffix) ] size, drs_uri, name =", "= response.json() validate_spec(spec) class DSSIntegrationTest(AzulTestCase): def test_patched_dss_client(self): query = {", "start = time.time() response = self._check_endpoint(config.service_endpoint(), '/manifest/files', params) log.info('Request %i/%i", "log calls: %r', captured_log.mock_calls) self.assertGreater(len(metadata), 0) self.assertGreater(set(f['name'] for f in", "indexed by TDR self.assertNotIn(':', row['entity:participant_id']) suffix = '__file_drs_uri' header, *rows", "furl(endpoint, path=path, query=query) return self._get_url_content(url.url) def _get_url_content(self, url: str) ->", "url: str) -> bytes: return self._get_url(url).content def _get_url(self, url: str,", "is None: break return entities def _assert_indices_exist(self, catalog: CatalogName): \"\"\"", "if access.method is AccessMethod.https: content = self._get_url_content(access.url) elif access.method is", "= azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica, fallback=True) dss_client = azul.dss.direct_access_client()", "!= 302: self.assertEqual(301, response['Status']) response = self._get_url(response['Location']).json() content = self._get_url_content(response['Location'])", "from typing import ( AbstractSet, Any, Dict, IO, List, Mapping,", "import csv from functools import ( lru_cache, ) import gzip", "# Extract the log method name and the first three", "catalog: CatalogName, bundle_fqids: Sequence[BundleFQID], max_bundles: int ) -> List[BundleFQID]: seed", "entity_type=entity_type): hits = self._get_entities(catalog, entity_type) self.assertEqual([], [hit['entryId'] for hit in", "( '/', '/openapi', '/version', '/index/summary', '/index/files/order', ) service_routes = (", "notifications = [invalid_notification] self.assertRaises(AzulClientNotificationError, self.azul_client.index, first(config.integration_test_catalogs), notifications) class PortalRegistrationIntegrationTest(IntegrationTestCase): #", "self.assertTrue(es_client.indices.exists(index_name)) class AzulClientIntegrationTest(IntegrationTestCase): def test_azul_client_error_handling(self): invalid_notification = {} notifications =", "multiple times so # some notifications will end up being", "config.service_endpoint(), config.indexer_endpoint() ) health_paths = ( '', # default keys", "notifications = list(self.notifications.values()) # Index some bundles again to test", "_test_repository_files(self, catalog: str): with self.subTest('repository_files', catalog=catalog): file_uuid = self._get_one_file_uuid(catalog) response", "catalog, size) plugin = self.azul_client.repository_plugin(catalog) drs_client = plugin.drs_client() access =", "load_app_module, ) from azul.portal_service import ( PortalService, ) from azul.requests", "( lru_cache, ) import gzip from io import ( BytesIO,", "manifest), set(metadata.keys())) for f in manifest: self.assertIn('s3_etag', f) # Extract", "} def _prune_test_bundles(self, catalog: CatalogName, bundle_fqids: Sequence[BundleFQID], max_bundles: int )", "def _assert_catalog_complete(self, catalog: CatalogName, entity_type: str, bundle_fqids: AbstractSet[BundleFQID]) -> None:", ") import uuid from zipfile import ( ZipFile, ) import", "app_module = load_app_module('service') app_dir = os.path.dirname(app_module.__file__) factory = chalice.cli.factory.CLIFactory(app_dir) config", "if direct else azul.dss.client() with self.assertRaises(SwaggerAPIException) as e: dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1', version='2018-11-19T232756.056947Z',", "from google.oauth2 import ( service_account, ) from hca.dss import (", "= {'fileFormat': {'is': ['fastq.gz', 'fastq']}} response = self._check_endpoint(endpoint=config.service_endpoint(), path='/index/files', query=dict(catalog=catalog,", "furl(scheme='http', host='127.0.0.1', port=8000) server = None server_thread = None @classmethod", "is None for f in futures)) new_db = portal_service.read() old_entries", "num_duplicates = self.num_bundles // 2 notifications = list(self.notifications.values()) # Index", "%i bundles in under %i seconds.', len(indexed_fqids), num_bundles, timeout) break", "response = self._check_endpoint(config.service_endpoint(), '/manifest/files', params) log.info('Request %i/%i took %.3fs to", "else: retries += 1 time.sleep(5) self.assertSetEqual(indexed_fqids, expected_fqids) entity_types = ['files',", "str, query: Optional[Mapping[str, Any]] = None) -> bytes: query =", "it, like Azul's DRS proxy of DSS. for method in", "'Failed getting bundle') ] + [ ('debug', 'Loading file %s'),", "CatalogName, response: bytes): with ZipFile(BytesIO(response)) as zip_fh: data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())),", "hits = self._get_entities(catalog, entity_type) self.assertEqual([], [hit['entryId'] for hit in hits])", "logging.getLogger(__name__) # noinspection PyPep8Naming def setUpModule(): configure_test_logging(log) class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta):", "mock, ) import uuid from zipfile import ( ZipFile, )", "erase the integration test catalog, the queues are # shared", "str(uuid.UUID(bundle_uuid))) return rows def _test_repository_files(self, catalog: str): with self.subTest('repository_files', catalog=catalog):", "expected %i bundles.', num_bundles) break elif len(indexed_fqids) > num_bundles: log.error('Found", "hits = self._get_entities(catalog, entity_type) indexed_fqids.update( BundleFQID(bundle['bundleUuid'], bundle['bundleVersion']) for hit in", "with DRS using %r', file_uuid, access_method) drs_uri = f'drs://{config.api_lambda_domain(\"service\")}/{file_uuid}' access", "self.assertEqual('https', furl(access.url).scheme) # Try HEAD first because it's more efficient,", "= buf.read(1024 * 1024) lines = fastq.splitlines() # Assert first", "the # DRS implementations prohibits it, like Azul's DRS proxy", "if direct: with self._failing_s3_get_object(): dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client,", "time from typing import ( AbstractSet, Any, Dict, IO, List,", ") from azul.modules import ( load_app_module, ) from azul.portal_service import", "if len(filtered_bundle_fqids) < max_bundles: if self.azul_client.bundle_has_project_json(catalog, bundle_fqid): filtered_bundle_fqids.append(bundle_fqid) else: break", "(200, 301, 302) self._assertResponseStatus(response, expected_statuses) return response def _assertResponseStatus(self, response:", "test_local_filtered_index_endpoints(self): filters = {'genusSpecies': {'is': ['Homo sapiens']}} url = self.url.copy().set(path='index/files',", "modifying the portals database. \"\"\" # Currently takes about 50", "1, attempts, time.time() - start) validator(catalog, response) @lru_cache(maxsize=None) def _get_one_file_uuid(self,", "drs_client = plugin.drs_client() access = drs_client.get_object(drs_uri, access_method=AccessMethod.https) self.assertIsNone(access.headers) self.assertEqual('https', furl(access.url).scheme)", "num_bundles, len(hits), entity_type, retries) if len(indexed_fqids) == num_bundles: log.info('Found the", "\"\"\" Aside from checking that all indices exist this method", "bundle') ] + [ ('debug', 'Loading file %s'), ('debug', 'Loading", "Pick bundles off of the randomly ordered input until we", "bundle_fqids=catalog.bundle_fqids) for catalog in catalogs: self._test_manifest(catalog.name) self._test_dos_and_drs(catalog.name) self._test_repository_files(catalog.name) if index", "+ 1, attempts, time.time() - start) validator(catalog, response) @lru_cache(maxsize=None) def", "mini_dss.get_bundle(uuid, version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_file(uuid, version, 'aws') with self.assertRaises(self.SpecialError):", "self.assertEqual(200, response.status_code) catalog = first(config.integration_test_catalogs.keys()) def test_local_chalice_index_endpoints(self): url = self.url.copy().set(path='index/files',", "bundles in under %i seconds.', len(indexed_fqids), num_bundles, timeout) break else:", "as # the one we're making the DRS request with.", "credentials = service_account.Credentials.from_service_account_file(path) storage_client = storage.Client(credentials=credentials) content = BytesIO() storage_client.download_blob_to_file(url,", "bytes)', drs_uri, name, catalog, size) plugin = self.azul_client.repository_plugin(catalog) drs_client =", "(None, self._check_manifest, 1), ('compact', self._check_manifest, 1), ('full', self._check_manifest, 3), ('terra.bdbag',", "{'fileFormat': {'is': ['fastq.gz', 'fastq']}} response = self._check_endpoint(endpoint=config.service_endpoint(), path='/index/files', query=dict(catalog=catalog, filters=json.dumps(filters),", "instantiate a local ES client pointing at a real, remote", "captured_log: _, manifest, metadata = download_bundle_metadata(client=dss_client, replica=replica, uuid=bundle_uuid, version=bundle_version, num_workers=config.num_dss_workers)", "'not_found') def test_mini_dss_failures(self): uuid = 'acafefed-beef-4bad-babe-feedfa11afe1' version = '2018-11-19T232756.056947Z' with", "for catalog in catalogs: log.info('Starting integration test for catalog %r", "= ( '/', '/openapi', '/version', '/index/summary', '/index/files/order', ) service_routes =", "stable deployment as # they may contain work for non-IT", "asserts that we can instantiate a local ES client pointing", "= azul.dss.direct_access_client() if direct else azul.dss.client() with self.assertRaises(SwaggerAPIException) as e:", "import requests from azul import ( CatalogName, cached_property, config, drs,", "} ], \"must\": [ { \"exists\": { \"field\": \"files.project_json\" }", "from unittest import ( mock, ) import uuid from zipfile", "403: break self.assertEqual(200, response.status, response.data) self.assertEqual(size, int(response.headers['Content-Length'])) def __check_manifest(self, file:", "# Pick bundles off of the randomly ordered input until", "self.notifications.keys() def notifications_with_duplicates(self) -> List[JSON]: num_duplicates = self.num_bundles // 2", "from azul import ( CatalogName, cached_property, config, drs, ) from", "for path in health_paths ) for endpoint, path in (*service_routes,", "# test once. Then also set `index` to False. Subsequent", "self.assertEqual(size, int(response.headers['Content-Length'])) def __check_manifest(self, file: IO[bytes], uuid_field_name: str) -> List[Mapping[str,", "'aws', 'gcp': if direct: with self._failing_s3_get_object(): dss_client = azul.dss.direct_access_client() self._test_dss_client(direct,", "if fallback: expected = [ ('debug', 'Loading bundle %s'), ('debug',", "_get_entities(self, catalog: CatalogName, entity_type): entities = [] size = 100", "from catalog %r (%i bytes)', drs_uri, name, catalog, size) plugin", "# message logged. Note that the PyCharm debugger will call", "n_threads * 10 n_ops = 5 portal_service = PortalService() entry_format", "= '' @cached_property def azul_client(self): return AzulClient(prefix=self.bundle_uuid_prefix) class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase):", "requests.Response, expected_statuses: Tuple[int, ...] = (200,)): self.assertIn(response.status_code, expected_statuses, (response.reason, response.content))", "content: bytes): # Check signature of FASTQ file. with gzip.open(BytesIO(content))", "len(metadata) else: expected = [] self.assertSequenceEqual(sorted(expected), sorted(actual)) def test_get_file_fail(self): for", "= logging.getLogger(__name__) # noinspection PyPep8Naming def setUpModule(): configure_test_logging(log) class IntegrationTestCase(AzulTestCase,", "_test_dss_client(self, direct: bool, query: JSON, dss_client: DSSClient, replica: str, fallback:", "self.assertEqual(e.exception.reason, 'not_found') def test_mini_dss_failures(self): uuid = 'acafefed-beef-4bad-babe-feedfa11afe1' version = '2018-11-19T232756.056947Z'", "calls: %r', captured_log.mock_calls) self.assertGreater(len(metadata), 0) self.assertGreater(set(f['name'] for f in manifest),", "'bundles'] def _assert_catalog_empty(self, catalog: CatalogName): for entity_type in self.entity_types: with", "in catalogs: self._assert_catalog_empty(catalog.name) self._test_other_endpoints() def _reset_indexer(self): # While it's OK", "_catalog: CatalogName, response: bytes): self.__check_manifest(BytesIO(response), 'bundle_uuid') def _check_terra_bdbag(self, catalog: CatalogName,", "config.service_endpoint() response = requests.get(service + '/') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['content-type'], 'text/html')", "self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['content-type'], 'text/html') self.assertGreater(len(response.content), 0) # validate OpenAPI spec", "self.subTest(direct=direct): dss_client = azul.dss.direct_access_client() if direct else azul.dss.client() with self.assertRaises(SwaggerAPIException)", "a local ES client pointing at a real, remote ES", "for portal in new_db if 'mock-count' in portal] self.assertEqual(len(mock_counts), len(set(mock_counts)))", "fallback=False) else: dss_client = azul.dss.client() self._test_dss_client(direct, query, dss_client, replica, fallback=False)", "content = BytesIO() storage_client.download_blob_to_file(url, content) return content.getvalue() def _validate_fastq_content(self, content:", "BytesIO() storage_client.download_blob_to_file(url, content) return content.getvalue() def _validate_fastq_content(self, content: bytes): #", "len(expected_fqids) timeout = 600 indexed_fqids = set() log.debug('Expecting bundles %s", "check fails right away, preventing any attempts of direct access", "prefix in prefixes if row[prefix + suffix] ) log.info('Resolving %r", "in range(n_tasks)] self.assertTrue(all(f.result() is None for f in futures)) new_db", "= kwargs['spec'] def mock_boto3_client(service, *args, **kwargs): if service == 's3':", "%s'), ('debug', 'Loading object %s') ] + [ ('debug', 'Loading", "zip_fh.open(file_path) as file: rows = self.__check_manifest(file, 'bundle_uuid') for row in", "self.subTest('other_endpoints', endpoint=endpoint, path=path): self._check_endpoint(endpoint, path) def _test_manifest(self, catalog: CatalogName): for", "token is the same as # the one we're making", "object %s'), ('warning', 'Error accessing bundle'), ('warning', 'Failed getting bundle')", "self._test_dos(catalog, file_uuid) self._test_drs(catalog, file_uuid) @cached_property def _requests(self) -> requests.Session: return", "= rows prefixes = [ c[:-len(suffix)] for c in header.keys()", "AlwaysTearDownTestCase, AzulTestCase, ) log = logging.getLogger(__name__) # noinspection PyPep8Naming def", "self._test_manifest(catalog.name) self._test_dos_and_drs(catalog.name) self._test_repository_files(catalog.name) if index and delete: for catalog in", "in AccessMethod: with self.subTest('drs', catalog=catalog, access_method=AccessMethod.https): log.info('Resolving file %r with", "response.status, response.data) self.assertEqual(size, int(response.headers['Content-Length'])) def __check_manifest(self, file: IO[bytes], uuid_field_name: str)", "catalog: CatalogName): for entity_type in self.entity_types: with self.subTest('catalog_empty', catalog=catalog, entity_type=entity_type):", "\"\"\" # Currently takes about 50 seconds and creates a", "super().setUpClass() app_module = load_app_module('service') app_dir = os.path.dirname(app_module.__file__) factory = chalice.cli.factory.CLIFactory(app_dir)", "(301, 302): file_url = response.headers['Location'] try: retry_after = response.headers['Retry-After'] except", "str): with self.subTest('dos', catalog=catalog): log.info('Resolving file %s with DOS', file_uuid)", "self._assert_catalog_empty(catalog.name) self._test_other_endpoints() def _reset_indexer(self): # While it's OK to erase", "num_bundles) break elif time.time() > deadline: log.error('Only found %i of", "seed %i.', max_bundles, len(bundle_fqids), seed) random_ = random.Random(x=seed) # The", "IndexService, ) from azul.logging import ( configure_test_logging, ) from azul.modules", "\"bar\", \"entity_type\": \"project\", \"integration_type\": \"get\", \"entity_ids\": [\"baz\"] } ], \"mock-count\":", "} } } self.maxDiff = None for direct in {config.dss_direct_access,", "= self._get_url(response['Location']).json() content = self._get_url_content(response['Location']) self._validate_fastq_content(content) def _test_drs(self, catalog: CatalogName,", ") import json import logging import os import random import", "AccessMethod, ) import azul.dss from azul.es import ( ESClientFactory, )", "precondition check fails right away, preventing any attempts of direct", "logging import os import random import re import sys import", "file_uuid: str): with self.subTest('dos', catalog=catalog): log.info('Resolving file %s with DOS',", "of bundles with project metadata. filtered_bundle_fqids = [] for bundle_fqid", "keys for lambda '/', # all keys '/basic', '/elasticsearch', '/queues',", "test_openapi(self): service = config.service_endpoint() response = requests.get(service + '/') self.assertEqual(response.status_code,", "response = requests.get(service + '/openapi') response.raise_for_status() spec = response.json() validate_spec(spec)", "url = furl(scheme='http', host='127.0.0.1', port=8000) server = None server_thread =", "import ( BytesIO, TextIOWrapper, ) import json import logging import", "ordered input until we have the # desired number of", "body = response.json() hits = body['hits'] entities.extend(hits) url = body['pagination']['next']", "in futures)) new_db = portal_service.read() old_entries = [portal for portal", "request if response.status_code in (301, 302): file_url = response.headers['Location'] try:", "sapiens']}} url = self.url.copy().set(path='index/files', query=dict(filters=json.dumps(filters), catalog=self.catalog)).url response = requests.get(url) self.assertEqual(200,", "response.status_code in (301, 302): file_url = response.headers['Location'] try: retry_after =", "self._get_url_content(url.url) def _get_url_content(self, url: str) -> bytes: return self._get_url(url).content def", "else: break self._assertResponseStatus(response) self._validate_fastq_content(response.content) def _get_gs_url_content(self, url: str) -> bytes:", "of the randomly ordered input until we have the #", "*rows = rows prefixes = [ c[:-len(suffix)] for c in", ") from azul.requests import ( requests_session_with_retry_after, ) from azul.types import", "part of the DRS response but we know that this", "endpoint: str, path: str, query: Optional[Mapping[str, Any]] = None) ->", "class Catalog: name: CatalogName notifications: Mapping[BundleFQID, JSON] @property def num_bundles(self):", "= time.time() response = self._check_endpoint(config.service_endpoint(), '/manifest/files', params) log.info('Request %i/%i took", "self.assertIsNone(access.headers) if access.method is AccessMethod.https: content = self._get_url_content(access.url) elif access.method", "= self.azul_client.repository_plugin(catalog) drs = repository_plugin.drs_client() for access_method in AccessMethod: with", "azul.es import ( ESClientFactory, ) from azul.indexer import ( BundleFQID,", "hit.get('bundles', []) ) log.info('Detected %i of %i bundles in %i", "io import ( BytesIO, TextIOWrapper, ) import json import logging", "\"gte\": \"2019-04-01\" } } } ] } } } self.maxDiff", "= ( config.service_endpoint(), config.indexer_endpoint() ) health_paths = ( '', #", "replica='aws') self.assertEqual(e.exception.reason, 'not_found') def test_mini_dss_failures(self): uuid = 'acafefed-beef-4bad-babe-feedfa11afe1' version =", "[portal for portal in new_db if 'mock-count' not in portal]", "for access_method in AccessMethod: with self.subTest('drs', catalog=catalog, access_method=AccessMethod.https): log.info('Resolving file", "* 1024) lines = fastq.splitlines() # Assert first character of", "handle redirects ourselves so we can log each request if", "config.dss_direct_access: file_uuid = self._get_one_file_uuid(catalog) self._test_dos(catalog, file_uuid) self._test_drs(catalog, file_uuid) @cached_property def", "response['Status']) response = self._get_url(response['Location']).json() content = self._get_url_content(response['Location']) self._validate_fastq_content(content) def _test_drs(self,", "blob ] * len(metadata) else: # On `gcp` the precondition", "setUpClass(cls) -> None: super().setUpClass() app_module = load_app_module('service') app_dir = os.path.dirname(app_module.__file__)", "sort='fileSize')) hits = json.loads(response) return one(one(hits['hits'])['files'])['uuid'] def _test_dos_and_drs(self, catalog: CatalogName):", "entity_types = ['files', 'projects', 'samples', 'bundles'] def _assert_catalog_empty(self, catalog: CatalogName):", "bundle_uuid = rows[0][uuid_field_name] self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid))) return rows def _test_repository_files(self, catalog:", "HTTP client should do but for TDR we need to", "if index: self._reset_indexer() catalogs: List[Catalog] = [ Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if", "log.error('Only found %i of %i bundles in under %i seconds.',", "portal_service._crud(lambda db: list(db) + [mock_entry]) old_db = portal_service.read() with ThreadPoolExecutor(max_workers=n_threads)", "preventing any attempts of direct access expected = [ ('warning',", "self._get_url(file_url, allow_redirects=False) # We handle redirects ourselves so we can", "+ timeout while True: hits = self._get_entities(catalog, entity_type) indexed_fqids.update( BundleFQID(bundle['bundleUuid'],", "fallback=False) class SpecialError(Exception): pass def _failing_s3_get_object(self): def make_mock(**kwargs): original =", ") from hca.util import ( SwaggerAPIException, ) from humancellatlas.data.metadata.helpers.dss import", "back to GET if the # DRS implementations prohibits it,", "True: response = self._get_url(file_url, allow_redirects=False) # We handle redirects ourselves", "uuid=bundle_uuid, version=bundle_version, num_workers=config.num_dss_workers) log.info('Captured log calls: %r', captured_log.mock_calls) self.assertGreater(len(metadata), 0)", "hit in hits]) def _get_entities(self, catalog: CatalogName, entity_type): entities =", "\"range\": { \"manifest.version\": { \"gte\": \"2019-04-01\" } } } ]", "%i.', len(indexed_fqids), num_bundles) break elif time.time() > deadline: log.error('Only found", "azul.dss.MiniDSS(config.dss_endpoint) with self.assertRaises(self.SpecialError): mini_dss._get_file_object(uuid, version) with self.assertRaises(KeyError): mini_dss._get_blob_key({}) with self.assertRaises(self.SpecialError):", "( AbstractSet, Any, Dict, IO, List, Mapping, Optional, Sequence, Tuple,", "re import sys import threading import time from typing import", "f'drs://{config.api_lambda_domain(\"service\")}/{file_uuid}' access = drs.get_object(drs_uri, access_method=access_method) self.assertIsNone(access.headers) if access.method is AccessMethod.https:", "from azul.drs import ( AccessMethod, ) import azul.dss from azul.es", "allow colons in this column, but they may # exist", "fails right away, preventing any attempts of direct access expected", "\"exists\": { \"field\": \"files.project_json\" } }, { \"range\": { \"manifest.version\":", "while True: response = self._get_url(file_url, allow_redirects=False) # We handle redirects", "first character of first and third line of file (see", "pollute portal DB') def test_concurrent_portal_db_crud(self): \"\"\" Use multithreading to simulate", "range(n_ops)}) # Reset to pre-test state. portal_service.overwrite(old_db) class OpenAPIIntegrationTest(AzulTestCase): def", "in health_endpoints for path in health_paths ) for endpoint, path", "%i of %i bundles in under %i seconds.', len(indexed_fqids), num_bundles,", "('warning', 'Error accessing bundle'), ('warning', 'Failed getting bundle') ] +", "body['hits'] entities.extend(hits) url = body['pagination']['next'] if url is None: break", "requests.get(service + '/openapi') response.raise_for_status() spec = response.json() validate_spec(spec) class DSSIntegrationTest(AzulTestCase):", "\"query\": { \"bool\": { \"must_not\": [ { \"term\": { \"admin_deleted\":", "reader = csv.DictReader(text, delimiter='\\t') rows = list(reader) log.info(f'Manifest contains {len(rows)}", "self.assertTrue(lines[2].startswith(b'+')) def _prepare_notifications(self, catalog: CatalogName) -> Dict[BundleFQID, JSON]: bundle_fqids =", "in header.keys() if c.endswith(suffix) ] size, drs_uri, name = min(", "('debug', 'Loading object %s'), # file ('debug', 'Loading object %s')", "for attempt in range(attempts): start = time.time() response = self._check_endpoint(config.service_endpoint(),", "= PortalService() entry_format = 'task={};op={}' def run(thread_count): for op_count in", "dss_client = azul.dss.client() self._test_dss_client(direct, query, dss_client, replica, fallback=False) class SpecialError(Exception):", "for non-IT catalogs. purge_queues=not config.is_stable_deployment(), delete_indices=True, create_indices=True) def _test_other_endpoints(self): service_paths", "allow_redirects else (200, 301, 302) self._assertResponseStatus(response, expected_statuses) return response def", "if 'mock-count' not in portal] self.assertEqual(old_entries, old_db) mock_counts = [portal['mock-count']", "azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica, fallback=True) dss_client = azul.dss.direct_access_client() self._test_dss_client(direct,", "if replica == 'aws': if fallback: expected = [ ('debug',", "def _prune_test_bundles(self, catalog: CatalogName, bundle_fqids: Sequence[BundleFQID], max_bundles: int ) ->", "= [ c[:-len(suffix)] for c in header.keys() if c.endswith(suffix) ]", "{len(rows)} rows.') self.assertGreater(len(rows), 0) self.assertIn(uuid_field_name, reader.fieldnames) bundle_uuid = rows[0][uuid_field_name] self.assertEqual(bundle_uuid,", "import ( SwaggerAPIException, ) from humancellatlas.data.metadata.helpers.dss import ( download_bundle_metadata, )", "mock_counts = [portal['mock-count'] for portal in new_db if 'mock-count' in", "CatalogName, entity_type): entities = [] size = 100 params =", "'__file_name'], ) for row in rows for prefix in prefixes", "of direct access expected = [ ('warning', 'Failed getting bundle')", "that this token is the same as # the one", "work for non-IT catalogs. purge_queues=not config.is_stable_deployment(), delete_indices=True, create_indices=True) def _test_other_endpoints(self):", "time.time() - start) validator(catalog, response) @lru_cache(maxsize=None) def _get_one_file_uuid(self, catalog: CatalogName)", "= first(json_data['urls'])['url'] while True: response = self._get_url(file_url, allow_redirects=False) # We", "len(indexed_fqids), num_bundles, timeout) break else: retries += 1 time.sleep(5) self.assertSetEqual(indexed_fqids,", "futures = [executor.submit(run, i) for i in range(n_tasks)] self.assertTrue(all(f.result() is", "two lines. index = True delete = True if index:", "rows = self.__check_manifest(file, 'bundle_uuid') for row in rows: # Terra", "replica=replica, uuid=bundle_uuid, version=bundle_version, num_workers=config.num_dss_workers) log.info('Captured log calls: %r', captured_log.mock_calls) self.assertGreater(len(metadata),", "class AzulChaliceLocalIntegrationTest(AzulTestCase): url = furl(scheme='http', host='127.0.0.1', port=8000) server = None", "{'is': ['fastq.gz', 'fastq']}} response = self._check_endpoint(endpoint=config.service_endpoint(), path='/index/files', query=dict(catalog=catalog, filters=json.dumps(filters), size=1,", "test_local_chalice_health_endpoint(self): url = self.url.copy().set(path='health').url response = requests.get(url) self.assertEqual(200, response.status_code) catalog", "for _ in range(cls.prefix_length) ]) def setUp(self) -> None: super().setUp()", "run. Don't commit changes to these two lines. index =", "return response def _assertResponseStatus(self, response: requests.Response, expected_statuses: Tuple[int, ...] =", "_assert_catalog_complete(self, catalog: CatalogName, entity_type: str, bundle_fqids: AbstractSet[BundleFQID]) -> None: with", "storage_client = storage.Client(credentials=credentials) content = BytesIO() storage_client.download_blob_to_file(url, content) return content.getvalue()", "sys.maxsize) @contextmanager def subTest(self, msg: Any = None, **params: Any):", "in %i hits for entity type %s on try #%i.',", "Check signature of FASTQ file. with gzip.open(BytesIO(content)) as buf: fastq", "[ ('warning', 'Failed getting bundle') ] + [ ('warning', 'Failed", "path=f'/fetch/repository/files/{file_uuid}', query=dict(catalog=catalog)) response = json.loads(response) while response['Status'] != 302: self.assertEqual(301,", "assertions. actual = [(m, ' '.join(re.split(r'[\\s,]', a[0])[:3])) for m, a,", "like Azul's DRS proxy of DSS. for method in ('HEAD',", "than the expected %i.', len(indexed_fqids), num_bundles) break elif time.time() >", "= [ ('warning', 'Failed getting bundle') ] + [ ('warning',", "per_page=10) bundle_uuid, _, bundle_version = response['results'][0]['bundle_fqid'].partition('.') with mock.patch('azul.dss.logger') as captured_log:", "= IndexService() for index_name in service.index_names(catalog): self.assertTrue(es_client.indices.exists(index_name)) class AzulClientIntegrationTest(IntegrationTestCase): def", "in the `headers` # part of the DRS response but", "abc import ( ABCMeta, ) from concurrent.futures.thread import ( ThreadPoolExecutor,", "{ \"query\": { \"bool\": { \"must_not\": [ { \"term\": {", "index else {}) for catalog in config.integration_test_catalogs ] if index:", "self.entity_types: with self.subTest('catalog_empty', catalog=catalog, entity_type=entity_type): hits = self._get_entities(catalog, entity_type) self.assertEqual([],", "plugin.drs_client() access = drs_client.get_object(drs_uri, access_method=AccessMethod.https) self.assertIsNone(access.headers) self.assertEqual('https', furl(access.url).scheme) # Try", "uuid = 'acafefed-beef-4bad-babe-feedfa11afe1' version = '2018-11-19T232756.056947Z' with self._failing_s3_get_object(): mini_dss =", "hits = body['hits'] entities.extend(hits) url = body['pagination']['next'] if url is", "for f in futures)) new_db = portal_service.read() old_entries = [portal", "random.choices() may pick the same element multiple times so #", "expected_statuses) return response def _assertResponseStatus(self, response: requests.Response, expected_statuses: Tuple[int, ...]", "notifications.extend(random.choices(notifications, k=num_duplicates)) return notifications def _wait_for_indexer(): num_bundles = sum(catalog.num_bundles for", "factory.create_config_obj() cls.server = factory.create_local_server(app_obj=app_module.app, config=config, host=cls.url.host, port=cls.url.port) cls.server_thread = threading.Thread(target=cls.server.serve_forever)", "time.sleep(int(retry_after)) else: break self._assertResponseStatus(response) self._validate_fastq_content(response.content) def _get_gs_url_content(self, url: str) ->", "notifications=catalog.notifications_with_duplicates(), delete=True) _wait_for_indexer() for catalog in catalogs: self._assert_catalog_empty(catalog.name) self._test_other_endpoints() def", "spec response = requests.get(service + '/openapi') response.raise_for_status() spec = response.json()", "return mock_boto3_client return mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock) def _test_dss_client(self, direct: bool,", "took %.3fs to execute.', attempt + 1, attempts, time.time() -", "%r with DRS using %r', file_uuid, access_method) drs_uri = f'drs://{config.api_lambda_domain(\"service\")}/{file_uuid}'", "catalog in catalogs: self._test_manifest(catalog.name) self._test_dos_and_drs(catalog.name) self._test_repository_files(catalog.name) if index and delete:", "\"entity_type\": \"project\", \"integration_type\": \"get\", \"entity_ids\": [\"baz\"] } ], \"mock-count\": entry_format.format(thread_count,", "replica == 'aws': if fallback: expected = [ ('debug', 'Loading", "would pollute portal DB') def test_concurrent_portal_db_crud(self): \"\"\" Use multithreading to", "'.join(re.split(r'[\\s,]', a[0])[:3])) for m, a, k in captured_log.mock_calls] if direct:", "under %i seconds.', len(indexed_fqids), num_bundles, timeout) break else: retries +=", "from zipfile import ( ZipFile, ) import attr import chalice.cli", "*args, **kwargs): if service == 's3': mock_s3 = mock.MagicMock() mock_s3.get_object.side_effect", "direct: bool, query: JSON, dss_client: DSSClient, replica: str, fallback: bool):", "lines. index = True delete = True if index: self._reset_indexer()", "import ( CatalogName, cached_property, config, drs, ) from azul.azulclient import", "from abc import ( ABCMeta, ) from concurrent.futures.thread import (", "None: super().setUpClass() cls.bundle_uuid_prefix = ''.join([ str(random.choice('abcdef0123456789')) for _ in range(cls.prefix_length)", "def _assert_indices_exist(self, catalog: CatalogName): \"\"\" Aside from checking that all", "file_path = os.path.join(data_path, 'participants.tsv') with zip_fh.open(file_path) as file: rows =", "( furl, ) from google.cloud import ( storage, ) from", "SwaggerAPIException, ) from humancellatlas.data.metadata.helpers.dss import ( download_bundle_metadata, ) from more_itertools", "the DRS request with. response = drs_client.http_client.request(method, access.url) if response.status", "order='asc', sort='fileSize')) hits = json.loads(response) return one(one(hits['hits'])['files'])['uuid'] def _test_dos_and_drs(self, catalog:", "with super().subTest(msg, **params): try: yield except BaseException: log.info('Failed sub-test [%s]", "break self.assertEqual(200, response.status, response.data) self.assertEqual(size, int(response.headers['Content-Length'])) def __check_manifest(self, file: IO[bytes],", "_prune_test_bundles(self, catalog: CatalogName, bundle_fqids: Sequence[BundleFQID], max_bundles: int ) -> List[BundleFQID]:", "self.subTest('repository_files', catalog=catalog): file_uuid = self._get_one_file_uuid(catalog) response = self._check_endpoint(endpoint=config.service_endpoint(), path=f'/fetch/repository/files/{file_uuid}', query=dict(catalog=catalog))", "in hit.get('bundles', []) ) log.info('Detected %i of %i bundles in", "+ '__file_size']), row[prefix + suffix], row[prefix + '__file_name'], ) for", "max_bundles: if self.azul_client.bundle_has_project_json(catalog, bundle_fqid): filtered_bundle_fqids.append(bundle_fqid) else: break return filtered_bundle_fqids def", "'projects', 'samples', 'bundles'] def _assert_catalog_empty(self, catalog: CatalogName): for entity_type in", "None: break return entities def _assert_indices_exist(self, catalog: CatalogName): \"\"\" Aside", "query, dss_client, replica, fallback=False) class SpecialError(Exception): pass def _failing_s3_get_object(self): def", "catalogs: self._assert_catalog_complete(catalog=catalog.name, entity_type='files', bundle_fqids=catalog.bundle_fqids) for catalog in catalogs: self._test_manifest(catalog.name) self._test_dos_and_drs(catalog.name)", "'/index/files/order', ) service_routes = ( (config.service_endpoint(), path) for path in", "cls.bundle_uuid_prefix = ''.join([ str(random.choice('abcdef0123456789')) for _ in range(cls.prefix_length) ]) def", "have the # desired number of bundles with project metadata.", "execute.', attempt + 1, attempts, time.time() - start) validator(catalog, response)", "pass else: time.sleep(int(retry_after)) else: break self._assertResponseStatus(response) self._validate_fastq_content(response.content) def _get_gs_url_content(self, url:", "= BytesIO() storage_client.download_blob_to_file(url, content) return content.getvalue() def _validate_fastq_content(self, content: bytes):", "def _check_manifest(self, _catalog: CatalogName, response: bytes): self.__check_manifest(BytesIO(response), 'bundle_uuid') def _check_terra_bdbag(self,", "v in query.items()} url = furl(endpoint, path=path, query=query) return self._get_url_content(url.url)", "'/') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['content-type'], 'text/html') self.assertGreater(len(response.content), 0) # validate OpenAPI", "* 60 @classmethod def setUpClass(cls) -> None: super().setUpClass() cls.bundle_uuid_prefix =", "(%r) from catalog %r (%i bytes)', drs_uri, name, catalog, size)", "deployment like production. self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs, # Can't purge the queues in", ") import requests from azul import ( CatalogName, cached_property, config,", "that we handle duplicate additions. # Note: random.choices() may pick", "one we're making the DRS request with. response = drs_client.http_client.request(method,", "!= 403: break self.assertEqual(200, response.status, response.data) self.assertEqual(size, int(response.headers['Content-Length'])) def __check_manifest(self,", "keys '/basic', '/elasticsearch', '/queues', '/progress', '/api_endpoints', '/other_lambdas' ) health_routes =", "should give same random order so we need to have", "response.content)) def _check_manifest(self, _catalog: CatalogName, response: bytes): self.__check_manifest(BytesIO(response), 'bundle_uuid') def", "sub-test [%s] %r', msg, params) raise else: log.info('Successful sub-test [%s]", "import ( load_app_module, ) from azul.portal_service import ( PortalService, )", "_test_dos_and_drs(self, catalog: CatalogName): if config.is_dss_enabled(catalog) and config.dss_direct_access: file_uuid = self._get_one_file_uuid(catalog)", "run(thread_count): for op_count in range(n_ops): mock_entry = cast(JSON, { \"portal_id\":", "for entity_type in self.entity_types: with self.subTest('catalog_empty', catalog=catalog, entity_type=entity_type): hits =", "from checking that all indices exist this method also asserts", "content = self._get_url_content(response['Location']) self._validate_fastq_content(content) def _test_drs(self, catalog: CatalogName, file_uuid: str):", "{ \"gte\": \"2019-04-01\" } } } ] } } }", "Optional[Mapping[str, Any]] = None) -> bytes: query = {} if", "int(response.headers['Content-Length'])) def __check_manifest(self, file: IO[bytes], uuid_field_name: str) -> List[Mapping[str, str]]:", "with mock.patch('azul.dss.logger') as captured_log: _, manifest, metadata = download_bundle_metadata(client=dss_client, replica=replica,", "file') ] * len(metadata) else: expected = [] self.assertSequenceEqual(sorted(expected), sorted(actual))", "= self._check_endpoint(config.service_endpoint(), '/manifest/files', params) log.info('Request %i/%i took %.3fs to execute.',", "dict(catalog=catalog, size=str(size)) url = furl(url=config.service_endpoint(), path=('index', entity_type), query_params=params ).url while", "ESClientFactory, ) from azul.indexer import ( BundleFQID, ) from azul.indexer.index_service", "# deployment like production. self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs, # Can't purge the queues", "in new_db if 'mock-count' in portal] self.assertEqual(len(mock_counts), len(set(mock_counts))) self.assertEqual(set(mock_counts), {entry_format.format(i,", "self.azul_client.bundle_has_project_json(catalog, bundle_fqid): filtered_bundle_fqids.append(bundle_fqid) else: break return filtered_bundle_fqids def _assert_catalog_complete(self, catalog:", "( DSSClient, ) from hca.util import ( SwaggerAPIException, ) from", "in hits for bundle in hit.get('bundles', []) ) log.info('Detected %i", "super().setUpClass() cls.bundle_uuid_prefix = ''.join([ str(random.choice('abcdef0123456789')) for _ in range(cls.prefix_length) ])", "Index some bundles again to test that we handle duplicate", "config.is_dss_enabled(catalog) and config.dss_direct_access: file_uuid = self._get_one_file_uuid(catalog) self._test_dos(catalog, file_uuid) self._test_drs(catalog, file_uuid)", "else: break return filtered_bundle_fqids def _assert_catalog_complete(self, catalog: CatalogName, entity_type: str,", "[mock_entry]) old_db = portal_service.read() with ThreadPoolExecutor(max_workers=n_threads) as executor: futures =", "] + [ ('warning', 'Failed getting file') ] * len(metadata)", "] * len(metadata) else: expected = [] self.assertSequenceEqual(sorted(expected), sorted(actual)) def", "max_bundles = 64 min_timeout = 20 * 60 @classmethod def", "60 @classmethod def setUpClass(cls) -> None: super().setUpClass() cls.bundle_uuid_prefix = ''.join([", "{ \"field\": \"files.project_json\" } }, { \"range\": { \"manifest.version\": {", "header, *rows = rows prefixes = [ c[:-len(suffix)] for c", "self.__check_manifest(file, 'bundle_uuid') for row in rows: # Terra doesn't allow", "indexed_fqids.update( BundleFQID(bundle['bundleUuid'], bundle['bundleVersion']) for hit in hits for bundle in", "{}) for catalog in config.integration_test_catalogs ] if index: for catalog", "by TDR self.assertNotIn(':', row['entity:participant_id']) suffix = '__file_drs_uri' header, *rows =", "format=format_, attempts=attempts): assert attempts > 0 params = dict(catalog=catalog) if", "test_azul_client_error_handling(self): invalid_notification = {} notifications = [invalid_notification] self.assertRaises(AzulClientNotificationError, self.azul_client.index, first(config.integration_test_catalogs),", "efficient, fall back to GET if the # DRS implementations", "CatalogName, file_uuid: str): with self.subTest('dos', catalog=catalog): log.info('Resolving file %s with", "self.assertTrue(url.startswith('gs://')) path = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] credentials = service_account.Credentials.from_service_account_file(path) storage_client = storage.Client(credentials=credentials)", "# assertions. actual = [(m, ' '.join(re.split(r'[\\s,]', a[0])[:3])) for m,", "cls.server_thread.start() @classmethod def tearDownClass(cls) -> None: cls.server.shutdown() cls.server_thread.join() super().tearDownClass() def", "= TextIOWrapper(file) reader = csv.DictReader(text, delimiter='\\t') rows = list(reader) log.info(f'Manifest", "_get_one_file_uuid(self, catalog: CatalogName) -> str: filters = {'fileFormat': {'is': ['fastq.gz',", "to use an # authenticated client. TDR does return a", "try #%i.', len(indexed_fqids), num_bundles, len(hits), entity_type, retries) if len(indexed_fqids) ==", "random_.shuffle(bundle_fqids) # Pick bundles off of the randomly ordered input", "n_tasks = n_threads * 10 n_ops = 5 portal_service =", "super().tearDownClass() def test_local_chalice_health_endpoint(self): url = self.url.copy().set(path='health').url response = requests.get(url) self.assertEqual(200,", "bundle') ] + [ ('warning', 'Failed getting file') ] *", "= self._get_url_content(response['Location']) self._validate_fastq_content(content) def _test_drs(self, catalog: CatalogName, file_uuid: str): repository_plugin", "replica in 'aws', 'gcp': if direct: with self._failing_s3_get_object(): dss_client =", "for f in manifest: self.assertIn('s3_etag', f) # Extract the log", "using random seed %i.', max_bundles, len(bundle_fqids), seed) random_ = random.Random(x=seed)", "for hit in hits for bundle in hit.get('bundles', []) )", "self._test_dss_client(direct, query, dss_client, replica, fallback=False) else: dss_client = azul.dss.client() self._test_dss_client(direct,", "len(hits), entity_type, retries) if len(indexed_fqids) == num_bundles: log.info('Found the expected", "in range(attempts): start = time.time() response = self._check_endpoint(config.service_endpoint(), '/manifest/files', params)", "'__file_size']), row[prefix + suffix], row[prefix + '__file_name'], ) for row", "Then also set `index` to False. Subsequent runs will use", "f) # Extract the log method name and the first", "azul_client(self): return AzulClient(prefix=self.bundle_uuid_prefix) class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase): prefix_length = 2 max_bundles", ") from humancellatlas.data.metadata.helpers.dss import ( download_bundle_metadata, ) from more_itertools import", "TDR we need to use an # authenticated client. TDR", "self.assertEqual(301, response['Status']) response = self._get_url(response['Location']).json() content = self._get_url_content(response['Location']) self._validate_fastq_content(content) def", "'Loading bundle %s'), ('debug', 'Loading object %s'), ('warning', 'Error accessing", "_wait_for_indexer(): num_bundles = sum(catalog.num_bundles for catalog in catalogs) self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles, min_timeout=self.min_timeout)", "= 20 * 60 @classmethod def setUpClass(cls) -> None: super().setUpClass()", "Currently takes about 50 seconds and creates a 25 kb", "'/health' + path) for endpoint in health_endpoints for path in", "purge the queues in stable deployment as # they may", "len(set(mock_counts))) self.assertEqual(set(mock_counts), {entry_format.format(i, j) for i in range(n_tasks) for j", "url = self.url.copy().set(path='index/files', query=dict(catalog=self.catalog)).url response = requests.get(url) self.assertEqual(200, response.status_code) def", "file. with gzip.open(BytesIO(content)) as buf: fastq = buf.read(1024 * 1024)", ") from azul_test_case import ( AlwaysTearDownTestCase, AzulTestCase, ) log =", "self.subTest('catalog_empty', catalog=catalog, entity_type=entity_type): hits = self._get_entities(catalog, entity_type) self.assertEqual([], [hit['entryId'] for", "break else: retries += 1 time.sleep(5) self.assertSetEqual(indexed_fqids, expected_fqids) entity_types =", "5 portal_service = PortalService() entry_format = 'task={};op={}' def run(thread_count): for", "attempts > 0 params = dict(catalog=catalog) if format_ is not", "bundles off of the randomly ordered input until we have", "'/index/summary', '/index/files/order', ) service_routes = ( (config.service_endpoint(), path) for path", "self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs, # Can't purge the queues in stable deployment as", "= [] for bundle_fqid in bundle_fqids: if len(filtered_bundle_fqids) < max_bundles:", "set() log.debug('Expecting bundles %s ', sorted(expected_fqids)) retries = 0 deadline", "Any, Dict, IO, List, Mapping, Optional, Sequence, Tuple, cast, )", "[ ('debug', 'Loading file %s'), ('debug', 'Loading object %s'), ('warning',", "version='2018-11-19T232756.056947Z', replica='aws') self.assertEqual(e.exception.reason, 'not_found') def test_mini_dss_failures(self): uuid = 'acafefed-beef-4bad-babe-feedfa11afe1' version", "} } ] } } } self.maxDiff = None for", "once. Then also set `index` to False. Subsequent runs will", "in query.items()} url = furl(endpoint, path=path, query=query) return self._get_url_content(url.url) def", "= cast(JSON, { \"portal_id\": \"foo\", \"integrations\": [ { \"integration_id\": \"bar\",", "ABCMeta, ) from concurrent.futures.thread import ( ThreadPoolExecutor, ) from contextlib", "self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles) return { bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid) for bundle_fqid", "bundles in %i hits for entity type %s on try", "rows prefixes = [ c[:-len(suffix)] for c in header.keys() if", "self._assert_catalog_complete(catalog=catalog.name, entity_type='files', bundle_fqids=catalog.bundle_fqids) for catalog in catalogs: self._test_manifest(catalog.name) self._test_dos_and_drs(catalog.name) self._test_repository_files(catalog.name)", "bundle in hit.get('bundles', []) ) log.info('Detected %i of %i bundles", "attempts of direct access expected = [ ('warning', 'Failed getting", "`gcp` the precondition check fails right away, preventing any attempts", "from prefix %r.', catalog, catalog.num_bundles, self.bundle_uuid_prefix) self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates()) _wait_for_indexer() for", "is not None: params['format'] = format_ for attempt in range(attempts):", "AzulClientIntegrationTest(IntegrationTestCase): def test_azul_client_error_handling(self): invalid_notification = {} notifications = [invalid_notification] self.assertRaises(AzulClientNotificationError,", "or config.is_main_deployment(), 'Test would pollute portal DB') def test_concurrent_portal_db_crud(self): \"\"\"", "set `index` to False. Subsequent runs will use # catalogs", "IO[bytes], uuid_field_name: str) -> List[Mapping[str, str]]: text = TextIOWrapper(file) reader", "IndexService() for index_name in service.index_names(catalog): self.assertTrue(es_client.indices.exists(index_name)) class AzulClientIntegrationTest(IntegrationTestCase): def test_azul_client_error_handling(self):", "True } } ], \"must\": [ { \"exists\": { \"field\":", "drs.get_object(drs_uri, access_method=access_method) self.assertIsNone(access.headers) if access.method is AccessMethod.https: content = self._get_url_content(access.url)", "first(config.integration_test_catalogs.keys()) def test_local_chalice_index_endpoints(self): url = self.url.copy().set(path='index/files', query=dict(catalog=self.catalog)).url response = requests.get(url)", "DSSClient, ) from hca.util import ( SwaggerAPIException, ) from humancellatlas.data.metadata.helpers.dss", "_prepare_notifications(self, catalog: CatalogName) -> Dict[BundleFQID, JSON]: bundle_fqids = self.azul_client.list_bundles(catalog) bundle_fqids", "False and run # test once. Then also set `index`", "{'content-type': 'application/json'}) with self.assertRaises(self.SpecialError): mini_dss.get_bundle(uuid, version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_file(uuid,", "expected = [] self.assertSequenceEqual(sorted(expected), sorted(actual)) def test_get_file_fail(self): for direct in", "( CatalogName, cached_property, config, drs, ) from azul.azulclient import (", "deadline: log.error('Only found %i of %i bundles in under %i", "False}: for replica in 'aws', 'gcp': if direct: with self._failing_s3_get_object():", "def _test_other_endpoints(self): service_paths = ( '/', '/openapi', '/version', '/index/summary', '/index/files/order',", "0 params = dict(catalog=catalog) if format_ is not None: params['format']", "\"admin_deleted\": True } } ], \"must\": [ { \"exists\": {", "k, v in query.items()} url = furl(endpoint, path=path, query=query) return", "BundleFQID, ) from azul.indexer.index_service import ( IndexService, ) from azul.logging", "self._validate_fastq_content(content) def _test_drs(self, catalog: CatalogName, file_uuid: str): repository_plugin = self.azul_client.repository_plugin(catalog)", "index = True delete = True if index: self._reset_indexer() catalogs:", "self.assertSequenceEqual(sorted(expected), sorted(actual)) def test_get_file_fail(self): for direct in {config.dss_direct_access, False}: with", "= threading.Thread(target=cls.server.serve_forever) cls.server_thread.start() @classmethod def tearDownClass(cls) -> None: cls.server.shutdown() cls.server_thread.join()", "from azul.indexer.index_service import ( IndexService, ) from azul.logging import (", "bundle_fqid) for bundle_fqid in bundle_fqids } def _prune_test_bundles(self, catalog: CatalogName,", "elif access.method is AccessMethod.gs: content = self._get_gs_url_content(access.url) else: self.fail(access_method) self._validate_fastq_content(content)", "catalogs: log.info('Starting integration test for catalog %r with %i bundles", "while True: response = self._get_url(url) body = response.json() hits =", "in manifest), set(metadata.keys())) for f in manifest: self.assertIn('s3_etag', f) #", "range(n_tasks)] self.assertTrue(all(f.result() is None for f in futures)) new_db =", "def make_mock(**kwargs): original = kwargs['spec'] def mock_boto3_client(service, *args, **kwargs): if", "\"integrations\": [ { \"integration_id\": \"bar\", \"entity_type\": \"project\", \"integration_type\": \"get\", \"entity_ids\":", "< max_bundles: if self.azul_client.bundle_has_project_json(catalog, bundle_fqid): filtered_bundle_fqids.append(bundle_fqid) else: break return filtered_bundle_fqids", "is the same as # the one we're making the", "with self.subTest(direct=direct): dss_client = azul.dss.direct_access_client() if direct else azul.dss.client() with", "'acafefed-beef-4bad-babe-feedfa11afe1' version = '2018-11-19T232756.056947Z' with self._failing_s3_get_object(): mini_dss = azul.dss.MiniDSS(config.dss_endpoint) with", "query: JSON, dss_client: DSSClient, replica: str, fallback: bool): with self.subTest(direct=direct,", "json.loads(response) while response['Status'] != 302: self.assertEqual(301, response['Status']) response = self._get_url(response['Location']).json()", "import ( AbstractSet, Any, Dict, IO, List, Mapping, Optional, Sequence,", "API is resolved # https://github.com/DataBiosphere/azul/issues/2399 @unittest.skipIf(True or config.is_main_deployment(), 'Test would", "the queues are # shared by all catalogs and we", "from azul.types import ( JSON, ) from azul_test_case import (", "bundle['bundleVersion']) for hit in hits for bundle in hit.get('bundles', [])", "version) with self.assertRaises(KeyError): mini_dss._get_blob_key({}) with self.assertRaises(self.SpecialError): mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'}) with", "a, k in captured_log.mock_calls] if direct: if replica == 'aws':", "index: self._reset_indexer() catalogs: List[Catalog] = [ Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if index", "[] for bundle_fqid in bundle_fqids: if len(filtered_bundle_fqids) < max_bundles: if", "but they may # exist in versions indexed by TDR", "self._requests.get(url, allow_redirects=allow_redirects) expected_statuses = (200,) if allow_redirects else (200, 301,", "\"field\": \"files.project_json\" } }, { \"range\": { \"manifest.version\": { \"gte\":", "query, dss_client, replica, fallback=False) else: dss_client = azul.dss.client() self._test_dss_client(direct, query,", "ES domain. \"\"\" es_client = ESClientFactory.get() service = IndexService() for", "them in a stable # deployment like production. self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs, #", "json.loads(response)['data_object'] file_url = first(json_data['urls'])['url'] while True: response = self._get_url(file_url, allow_redirects=False)", "response.raise_for_status() spec = response.json() validate_spec(spec) class DSSIntegrationTest(AzulTestCase): def test_patched_dss_client(self): query", "We handle redirects ourselves so we can log each request", "client should do but for TDR we need to use", "self._test_repository_files(catalog.name) if index and delete: for catalog in catalogs: self.azul_client.index(catalog=catalog.name,", "def test_local_chalice_index_endpoints(self): url = self.url.copy().set(path='index/files', query=dict(catalog=self.catalog)).url response = requests.get(url) self.assertEqual(200,", "validator, attempts in [ (None, self._check_manifest, 1), ('compact', self._check_manifest, 1),", "_ in range(cls.prefix_length) ]) def setUp(self) -> None: super().setUp() self.pruning_seed", "new_db if 'mock-count' in portal] self.assertEqual(len(mock_counts), len(set(mock_counts))) self.assertEqual(set(mock_counts), {entry_format.format(i, j)", "with self.assertRaises(SwaggerAPIException) as e: dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1', version='2018-11-19T232756.056947Z', replica='aws') self.assertEqual(e.exception.reason, 'not_found') def", "return self.notifications.keys() def notifications_with_duplicates(self) -> List[JSON]: num_duplicates = self.num_bundles //", "50 seconds and creates a 25 kb db file. n_threads", "[hit['entryId'] for hit in hits]) def _get_entities(self, catalog: CatalogName, entity_type):", "access_method=access_method) self.assertIsNone(access.headers) if access.method is AccessMethod.https: content = self._get_url_content(access.url) elif", "break self._assertResponseStatus(response) self._validate_fastq_content(response.content) def _get_gs_url_content(self, url: str) -> bytes: self.assertTrue(url.startswith('gs://'))", "metaclass=ABCMeta): bundle_uuid_prefix: str = '' @cached_property def azul_client(self): return AzulClient(prefix=self.bundle_uuid_prefix)", "+ '/openapi') response.raise_for_status() spec = response.json() validate_spec(spec) class DSSIntegrationTest(AzulTestCase): def", "health_routes = ( (endpoint, '/health' + path) for endpoint in", "(200,) if allow_redirects else (200, 301, 302) self._assertResponseStatus(response, expected_statuses) return", "for catalog %r with %i bundles from prefix %r.', catalog,", "IntegrationTestCase(AzulTestCase, metaclass=ABCMeta): bundle_uuid_prefix: str = '' @cached_property def azul_client(self): return", "file %r with DRS using %r', file_uuid, access_method) drs_uri =", "config = factory.create_config_obj() cls.server = factory.create_local_server(app_obj=app_module.app, config=config, host=cls.url.host, port=cls.url.port) cls.server_thread", "_test_dos(self, catalog: CatalogName, file_uuid: str): with self.subTest('dos', catalog=catalog): log.info('Resolving file", "None @classmethod def setUpClass(cls) -> None: super().setUpClass() app_module = load_app_module('service')", "file_url = response.headers['Location'] try: retry_after = response.headers['Retry-After'] except KeyError: pass", "access_method=AccessMethod.https): log.info('Resolving file %r with DRS using %r', file_uuid, access_method)", "('HEAD', 'GET'): log.info('%s %s', method, access.url) # For DSS, any", "AzulClientNotificationError, ) from azul.drs import ( AccessMethod, ) import azul.dss", "_assert_indices_exist(self, catalog: CatalogName): \"\"\" Aside from checking that all indices", ") for row in rows for prefix in prefixes if", "test once. Then also set `index` to False. Subsequent runs", "of FASTQ file. with gzip.open(BytesIO(content)) as buf: fastq = buf.read(1024", "we can log each request if response.status_code in (301, 302):", "fallback=fallback): response = dss_client.post_search(es_query=query, replica=replica, per_page=10) bundle_uuid, _, bundle_version =", "# deterministic order in the input list. bundle_fqids = sorted(bundle_fqids)", "_assert_catalog_empty(self, catalog: CatalogName): for entity_type in self.entity_types: with self.subTest('catalog_empty', catalog=catalog,", "'gcp': if direct: with self._failing_s3_get_object(): dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query,", "the expected %i.', len(indexed_fqids), num_bundles) break elif time.time() > deadline:", "in config.integration_test_catalogs ] if index: for catalog in catalogs: log.info('Starting", "= [] size = 100 params = dict(catalog=catalog, size=str(size)) url", "a 25 kb db file. n_threads = 10 n_tasks =", "version, 'aws') class AzulChaliceLocalIntegrationTest(AzulTestCase): url = furl(scheme='http', host='127.0.0.1', port=8000) server", "(endpoint, '/health' + path) for endpoint in health_endpoints for path", "mock_s3 else: return original(service, *args, **kwargs) return mock_boto3_client return mock.patch('azul.deployment.aws.client',", "fastq = buf.read(1024 * 1024) lines = fastq.splitlines() # Assert", "[%s] %r', msg, params) raise else: log.info('Successful sub-test [%s] %r',", "time.sleep(5) self.assertSetEqual(indexed_fqids, expected_fqids) entity_types = ['files', 'projects', 'samples', 'bundles'] def", "cast, ) import unittest from unittest import ( mock, )", "%r', msg, params) raise else: log.info('Successful sub-test [%s] %r', msg,", "bundle_uuid_prefix: str = '' @cached_property def azul_client(self): return AzulClient(prefix=self.bundle_uuid_prefix) class", "def test(self): @attr.s(auto_attribs=True, kw_only=True) class Catalog: name: CatalogName notifications: Mapping[BundleFQID,", "filters = {'fileFormat': {'is': ['fastq.gz', 'fastq']}} response = self._check_endpoint(endpoint=config.service_endpoint(), path='/index/files',", "IO, List, Mapping, Optional, Sequence, Tuple, cast, ) import unittest", "concurrent.futures.thread import ( ThreadPoolExecutor, ) from contextlib import ( contextmanager,", "to pre-test state. portal_service.overwrite(old_db) class OpenAPIIntegrationTest(AzulTestCase): def test_openapi(self): service =", "kw_only=True) class Catalog: name: CatalogName notifications: Mapping[BundleFQID, JSON] @property def", "configure_test_logging, ) from azul.modules import ( load_app_module, ) from azul.portal_service", "catalog: CatalogName): if config.is_dss_enabled(catalog) and config.dss_direct_access: file_uuid = self._get_one_file_uuid(catalog) self._test_dos(catalog,", "config.is_stable_deployment(), delete_indices=True, create_indices=True) def _test_other_endpoints(self): service_paths = ( '/', '/openapi',", "drs_client.http_client.request(method, access.url) if response.status != 403: break self.assertEqual(200, response.status, response.data)", "fallback: bool): with self.subTest(direct=direct, replica=replica, fallback=fallback): response = dss_client.post_search(es_query=query, replica=replica,", "storage, ) from google.oauth2 import ( service_account, ) from hca.dss", "log.info('Found the expected %i bundles.', num_bundles) break elif len(indexed_fqids) >", "in (301, 302): file_url = response.headers['Location'] try: retry_after = response.headers['Retry-After']", "BaseException: log.info('Failed sub-test [%s] %r', msg, params) raise else: log.info('Successful", "List[Mapping[str, str]]: text = TextIOWrapper(file) reader = csv.DictReader(text, delimiter='\\t') rows", "if row[prefix + suffix] ) log.info('Resolving %r (%r) from catalog", "self.assertGreater(len(response.content), 0) # validate OpenAPI spec response = requests.get(service +", "= drs.get_object(drs_uri, access_method=access_method) self.assertIsNone(access.headers) if access.method is AccessMethod.https: content =", "captured_log.mock_calls) self.assertGreater(len(metadata), 0) self.assertGreater(set(f['name'] for f in manifest), set(metadata.keys())) for", "the # desired number of bundles with project metadata. filtered_bundle_fqids", "# For faster modify-deploy-test cycles, set `delete` to False and", "mock.MagicMock() mock_s3.get_object.side_effect = self.SpecialError() return mock_s3 else: return original(service, *args,", "str = '' @cached_property def azul_client(self): return AzulClient(prefix=self.bundle_uuid_prefix) class IndexingIntegrationTest(IntegrationTestCase,", "bundle_fqids } def _prune_test_bundles(self, catalog: CatalogName, bundle_fqids: Sequence[BundleFQID], max_bundles: int", "bytes): # Check signature of FASTQ file. with gzip.open(BytesIO(content)) as", "_assertResponseStatus(self, response: requests.Response, expected_statuses: Tuple[int, ...] = (200,)): self.assertIn(response.status_code, expected_statuses,", "= os.path.join(data_path, 'participants.tsv') with zip_fh.open(file_path) as file: rows = self.__check_manifest(file,", "by all catalogs and we can't afford to trash them", "it's more efficient, fall back to GET if the #", "= dss_client.post_search(es_query=query, replica=replica, per_page=10) bundle_uuid, _, bundle_version = response['results'][0]['bundle_fqid'].partition('.') with", "give same random order so we need to have a", "access_method) drs_uri = f'drs://{config.api_lambda_domain(\"service\")}/{file_uuid}' access = drs.get_object(drs_uri, access_method=access_method) self.assertIsNone(access.headers) if", "True: response = self._get_url(url) body = response.json() hits = body['hits']", "we need to use an # authenticated client. TDR does", "may contain work for non-IT catalogs. purge_queues=not config.is_stable_deployment(), delete_indices=True, create_indices=True)", "import ( service_account, ) from hca.dss import ( DSSClient, )", "indices exist this method also asserts that we can instantiate", "os import random import re import sys import threading import", "-> requests.Session: return requests_session_with_retry_after() def _check_endpoint(self, endpoint: str, path: str,", "portal_service.read() old_entries = [portal for portal in new_db if 'mock-count'", "self._test_dss_client(direct, query, dss_client, replica, fallback=False) class SpecialError(Exception): pass def _failing_s3_get_object(self):", "fallback: expected = [ ('debug', 'Loading bundle %s'), ('debug', 'Loading", "self._test_dos_and_drs(catalog.name) self._test_repository_files(catalog.name) if index and delete: for catalog in catalogs:", "%r.', catalog, catalog.num_bundles, self.bundle_uuid_prefix) self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates()) _wait_for_indexer() for catalog in", "random.Random(x=seed) # The same seed should give same random order", "# authenticated client. TDR does return a Bearer token in", "'/api_endpoints', '/other_lambdas' ) health_routes = ( (endpoint, '/health' + path)", "CatalogName, cached_property, config, drs, ) from azul.azulclient import ( AzulClient,", "we need to have a # deterministic order in the", "msg, params) with super().subTest(msg, **params): try: yield except BaseException: log.info('Failed", "about 50 seconds and creates a 25 kb db file.", "path=path, query=query) return self._get_url_content(url.url) def _get_url_content(self, url: str) -> bytes:", "for row in rows: # Terra doesn't allow colons in", "attempts in [ (None, self._check_manifest, 1), ('compact', self._check_manifest, 1), ('full',", "old_db = portal_service.read() with ThreadPoolExecutor(max_workers=n_threads) as executor: futures = [executor.submit(run,", "if response.status_code in (301, 302): file_url = response.headers['Location'] try: retry_after", "None: with self.subTest('catalog_complete', catalog=catalog): expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids)) obsolete_fqids = bundle_fqids", "rows: # Terra doesn't allow colons in this column, but", "suffix = '__file_drs_uri' header, *rows = rows prefixes = [", "notifications=self._prepare_notifications(catalog) if index else {}) for catalog in config.integration_test_catalogs ]", "None for direct in {config.dss_direct_access, False}: for replica in 'aws',", "self._check_manifest, 1), ('full', self._check_manifest, 3), ('terra.bdbag', self._check_terra_bdbag, 1) ]: with", "%i seconds.', len(indexed_fqids), num_bundles, timeout) break else: retries += 1", "for portal in new_db if 'mock-count' not in portal] self.assertEqual(old_entries,", "a # deterministic order in the input list. bundle_fqids =", "integration test catalog, the queues are # shared by all", "body['pagination']['next'] if url is None: break return entities def _assert_indices_exist(self,", "_reset_indexer(self): # While it's OK to erase the integration test", "bundles.', num_bundles) break elif len(indexed_fqids) > num_bundles: log.error('Found %i bundles,", "Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if index else {}) for catalog in config.integration_test_catalogs", ") health_paths = ( '', # default keys for lambda", "content = self._get_gs_url_content(access.url) else: self.fail(access_method) self._validate_fastq_content(content) def _test_dos(self, catalog: CatalogName,", "new_callable=make_mock) def _test_dss_client(self, direct: bool, query: JSON, dss_client: DSSClient, replica:", "to simulate multiple users simultaneously modifying the portals database. \"\"\"", "> deadline: log.error('Only found %i of %i bundles in under", "lru_cache, ) import gzip from io import ( BytesIO, TextIOWrapper,", "%r (%r) from catalog %r (%i bytes)', drs_uri, name, catalog,", "}) portal_service._crud(lambda db: list(db) + [mock_entry]) old_db = portal_service.read() with", "dss_client: DSSClient, replica: str, fallback: bool): with self.subTest(direct=direct, replica=replica, fallback=fallback):", "resolved # https://github.com/DataBiosphere/azul/issues/2399 @unittest.skipIf(True or config.is_main_deployment(), 'Test would pollute portal", "furl, ) from google.cloud import ( storage, ) from google.oauth2", "with gzip.open(BytesIO(content)) as buf: fastq = buf.read(1024 * 1024) lines", "portal_service.read() with ThreadPoolExecutor(max_workers=n_threads) as executor: futures = [executor.submit(run, i) for", "config.is_main_deployment(), 'Test would pollute portal DB') def test_concurrent_portal_db_crud(self): \"\"\" Use", "mini_dss.get_file(uuid, version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_native_file_url(uuid, version, 'aws') class AzulChaliceLocalIntegrationTest(AzulTestCase):", "for path in service_paths ) health_endpoints = ( config.service_endpoint(), config.indexer_endpoint()", "with self.assertRaises(self.SpecialError): mini_dss.get_native_file_url(uuid, version, 'aws') class AzulChaliceLocalIntegrationTest(AzulTestCase): url = furl(scheme='http',", "import ( PortalService, ) from azul.requests import ( requests_session_with_retry_after, )", "catalog: CatalogName, entity_type: str, bundle_fqids: AbstractSet[BundleFQID]) -> None: with self.subTest('catalog_complete',", "entity_type): entities = [] size = 100 params = dict(catalog=catalog,", "notifications) class PortalRegistrationIntegrationTest(IntegrationTestCase): # FIXME: Re-enable once overloading of S3", "[%s] %r', msg, params) with super().subTest(msg, **params): try: yield except", "attempts, time.time() - start) validator(catalog, response) @lru_cache(maxsize=None) def _get_one_file_uuid(self, catalog:", "query.items()} url = furl(endpoint, path=path, query=query) return self._get_url_content(url.url) def _get_url_content(self,", "google.oauth2 import ( service_account, ) from hca.dss import ( DSSClient,", "import ( requests_session_with_retry_after, ) from azul.types import ( JSON, )", "str) -> bytes: return self._get_url(url).content def _get_url(self, url: str, allow_redirects=True)", "from hca.util import ( SwaggerAPIException, ) from humancellatlas.data.metadata.helpers.dss import (", "rows for prefix in prefixes if row[prefix + suffix] )", "%i candidates, using random seed %i.', max_bundles, len(bundle_fqids), seed) random_", "200) self.assertEqual(response.headers['content-type'], 'text/html') self.assertGreater(len(response.content), 0) # validate OpenAPI spec response", "else {}) for catalog in config.integration_test_catalogs ] if index: for", "actual = [(m, ' '.join(re.split(r'[\\s,]', a[0])[:3])) for m, a, k", "expected_statuses: Tuple[int, ...] = (200,)): self.assertIn(response.status_code, expected_statuses, (response.reason, response.content)) def", "endpoint=endpoint, path=path): self._check_endpoint(endpoint, path) def _test_manifest(self, catalog: CatalogName): for format_,", "= f'drs://{config.api_lambda_domain(\"service\")}/{file_uuid}' access = drs.get_object(drs_uri, access_method=access_method) self.assertIsNone(access.headers) if access.method is", "`headers` # part of the DRS response but we know", "server = None server_thread = None @classmethod def setUpClass(cls) ->", "first three words of log # message logged. Note that", "import uuid from zipfile import ( ZipFile, ) import attr", "( JSON, ) from azul_test_case import ( AlwaysTearDownTestCase, AzulTestCase, )", "response = self._requests.get(url, allow_redirects=allow_redirects) expected_statuses = (200,) if allow_redirects else", "token in the `headers` # part of the DRS response", "will call # certain dunder methods on the variable, leading", "https://en.wikipedia.org/wiki/FASTQ_format). self.assertTrue(lines[0].startswith(b'@')) self.assertTrue(lines[2].startswith(b'+')) def _prepare_notifications(self, catalog: CatalogName) -> Dict[BundleFQID, JSON]:", "class AzulClientIntegrationTest(IntegrationTestCase): def test_azul_client_error_handling(self): invalid_notification = {} notifications = [invalid_notification]", "def notifications_with_duplicates(self) -> List[JSON]: num_duplicates = self.num_bundles // 2 notifications", "if 'mock-count' in portal] self.assertEqual(len(mock_counts), len(set(mock_counts))) self.assertEqual(set(mock_counts), {entry_format.format(i, j) for", "= self.SpecialError() return mock_s3 else: return original(service, *args, **kwargs) return", "but for TDR we need to use an # authenticated", "for f in manifest), set(metadata.keys())) for f in manifest: self.assertIn('s3_etag',", "= factory.create_local_server(app_obj=app_module.app, config=config, host=cls.url.host, port=cls.url.port) cls.server_thread = threading.Thread(target=cls.server.serve_forever) cls.server_thread.start() @classmethod", "= self._get_entities(catalog, entity_type) self.assertEqual([], [hit['entryId'] for hit in hits]) def", "in hits]) def _get_entities(self, catalog: CatalogName, entity_type): entities = []", "new_db = portal_service.read() old_entries = [portal for portal in new_db", "# noinspection PyPep8Naming def setUpModule(): configure_test_logging(log) class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta): bundle_uuid_prefix:", "same element multiple times so # some notifications will end", "**params: Any): log.info('Beginning sub-test [%s] %r', msg, params) with super().subTest(msg,", "def _prepare_notifications(self, catalog: CatalogName) -> Dict[BundleFQID, JSON]: bundle_fqids = self.azul_client.list_bundles(catalog)", "hca.util import ( SwaggerAPIException, ) from humancellatlas.data.metadata.helpers.dss import ( download_bundle_metadata,", "self._reset_indexer() catalogs: List[Catalog] = [ Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if index else", "the input list. bundle_fqids = sorted(bundle_fqids) random_.shuffle(bundle_fqids) # Pick bundles", "@classmethod def tearDownClass(cls) -> None: cls.server.shutdown() cls.server_thread.join() super().tearDownClass() def test_local_chalice_health_endpoint(self):", "from azul.azulclient import ( AzulClient, AzulClientNotificationError, ) from azul.drs import", "the `headers` # part of the DRS response but we", "drs_uri = f'drs://{config.api_lambda_domain(\"service\")}/{file_uuid}' access = drs.get_object(drs_uri, access_method=access_method) self.assertIsNone(access.headers) if access.method", "first run. Don't commit changes to these two lines. index", "with self.subTest('drs', catalog=catalog, access_method=AccessMethod.https): log.info('Resolving file %r with DRS using", "= self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles) return { bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid) for", "= requests.get(url) self.assertEqual(200, response.status_code) catalog = first(config.integration_test_catalogs.keys()) def test_local_chalice_index_endpoints(self): url", "in prefixes if row[prefix + suffix] ) log.info('Resolving %r (%r)", "azul_test_case import ( AlwaysTearDownTestCase, AzulTestCase, ) log = logging.getLogger(__name__) #", "exist this method also asserts that we can instantiate a", "data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data') file_path = os.path.join(data_path, 'participants.tsv') with zip_fh.open(file_path)", "List, Mapping, Optional, Sequence, Tuple, cast, ) import unittest from", "# all keys '/basic', '/elasticsearch', '/queues', '/progress', '/api_endpoints', '/other_lambdas' )", "{entry_format.format(i, j) for i in range(n_tasks) for j in range(n_ops)})", "delete_indices=True, create_indices=True) def _test_other_endpoints(self): service_paths = ( '/', '/openapi', '/version',", "file %s with DOS', file_uuid) response = self._check_endpoint(config.service_endpoint(), path=drs.dos_object_url_path(file_uuid), query=dict(catalog=catalog))", "file. n_threads = 10 n_tasks = n_threads * 10 n_ops", "database. \"\"\" # Currently takes about 50 seconds and creates", "is resolved # https://github.com/DataBiosphere/azul/issues/2399 @unittest.skipIf(True or config.is_main_deployment(), 'Test would pollute", "= ['files', 'projects', 'samples', 'bundles'] def _assert_catalog_empty(self, catalog: CatalogName): for", "expected_fqids) entity_types = ['files', 'projects', 'samples', 'bundles'] def _assert_catalog_empty(self, catalog:", "bundle_fqids(self) -> AbstractSet[BundleFQID]: return self.notifications.keys() def notifications_with_duplicates(self) -> List[JSON]: num_duplicates", "endpoint in health_endpoints for path in health_paths ) for endpoint,", "def _test_manifest(self, catalog: CatalogName): for format_, validator, attempts in [", "( validate_spec, ) import requests from azul import ( CatalogName,", "( (config.service_endpoint(), path) for path in service_paths ) health_endpoints =", "file_url = first(json_data['urls'])['url'] while True: response = self._get_url(file_url, allow_redirects=False) #", "JSON, ) from azul_test_case import ( AlwaysTearDownTestCase, AzulTestCase, ) log", "[invalid_notification] self.assertRaises(AzulClientNotificationError, self.azul_client.index, first(config.integration_test_catalogs), notifications) class PortalRegistrationIntegrationTest(IntegrationTestCase): # FIXME: Re-enable", "url = self.url.copy().set(path='health').url response = requests.get(url) self.assertEqual(200, response.status_code) catalog =", "path) for path in service_paths ) health_endpoints = ( config.service_endpoint(),", "bundle_fqids - expected_fqids if obsolete_fqids: log.debug('Ignoring obsolete bundle versions %r',", "# certain dunder methods on the variable, leading to failed", "def _get_url_content(self, url: str) -> bytes: return self._get_url(url).content def _get_url(self,", "text = TextIOWrapper(file) reader = csv.DictReader(text, delimiter='\\t') rows = list(reader)", "('full', self._check_manifest, 3), ('terra.bdbag', self._check_terra_bdbag, 1) ]: with self.subTest('manifest', catalog=catalog,", "else {k: str(v) for k, v in query.items()} url =", "number of bundles with project metadata. filtered_bundle_fqids = [] for", "the queues in stable deployment as # they may contain", "+ suffix] ) log.info('Resolving %r (%r) from catalog %r (%i", "row[prefix + suffix] ) log.info('Resolving %r (%r) from catalog %r", "self.subTest('dos', catalog=catalog): log.info('Resolving file %s with DOS', file_uuid) response =", "( ZipFile, ) import attr import chalice.cli from furl import", "-> requests.Response: log.info('GET %s', url) response = self._requests.get(url, allow_redirects=allow_redirects) expected_statuses", "return rows def _test_repository_files(self, catalog: str): with self.subTest('repository_files', catalog=catalog): file_uuid", "= self._check_endpoint(endpoint=config.service_endpoint(), path=f'/fetch/repository/files/{file_uuid}', query=dict(catalog=catalog)) response = json.loads(response) while response['Status'] !=", "return AzulClient(prefix=self.bundle_uuid_prefix) class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase): prefix_length = 2 max_bundles =", "num_bundles, timeout) break else: retries += 1 time.sleep(5) self.assertSetEqual(indexed_fqids, expected_fqids)", "[ ('debug', 'Loading bundle %s'), ('debug', 'Loading object %s') ]", "* len(metadata) else: # On `gcp` the precondition check fails", "@classmethod def setUpClass(cls) -> None: super().setUpClass() cls.bundle_uuid_prefix = ''.join([ str(random.choice('abcdef0123456789'))", "[]) ) log.info('Detected %i of %i bundles in %i hits", "'data') file_path = os.path.join(data_path, 'participants.tsv') with zip_fh.open(file_path) as file: rows", "'/', # all keys '/basic', '/elasticsearch', '/queues', '/progress', '/api_endpoints', '/other_lambdas'", "out of %i candidates, using random seed %i.', max_bundles, len(bundle_fqids),", "= azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica, fallback=False) else: dss_client =", "suffix] ) log.info('Resolving %r (%r) from catalog %r (%i bytes)',", "in portal] self.assertEqual(old_entries, old_db) mock_counts = [portal['mock-count'] for portal in", "response) @lru_cache(maxsize=None) def _get_one_file_uuid(self, catalog: CatalogName) -> str: filters =", "noinspection PyPep8Naming def setUpModule(): configure_test_logging(log) class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta): bundle_uuid_prefix: str", "self.assertGreater(len(rows), 0) self.assertIn(uuid_field_name, reader.fieldnames) bundle_uuid = rows[0][uuid_field_name] self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid))) return", "catalog: CatalogName, entity_type): entities = [] size = 100 params", "purge_queues=not config.is_stable_deployment(), delete_indices=True, create_indices=True) def _test_other_endpoints(self): service_paths = ( '/',", "c[:-len(suffix)] for c in header.keys() if c.endswith(suffix) ] size, drs_uri,", "= self.pruning_seed log.info('Selecting %i bundles with projects, out of %i", "file: rows = self.__check_manifest(file, 'bundle_uuid') for row in rows: #", "'Loading bundle %s'), ('debug', 'Loading object %s') ] + [", "class PortalRegistrationIntegrationTest(IntegrationTestCase): # FIXME: Re-enable once overloading of S3 API", "requests.get(url) self.assertEqual(200, response.status_code) def test_local_filtered_index_endpoints(self): filters = {'genusSpecies': {'is': ['Homo", "for prefix in prefixes if row[prefix + suffix] ) log.info('Resolving", "once overloading of S3 API is resolved # https://github.com/DataBiosphere/azul/issues/2399 @unittest.skipIf(True", "('terra.bdbag', self._check_terra_bdbag, 1) ]: with self.subTest('manifest', catalog=catalog, format=format_, attempts=attempts): assert", "= ( (config.service_endpoint(), path) for path in service_paths ) health_endpoints", "new_db if 'mock-count' not in portal] self.assertEqual(old_entries, old_db) mock_counts =", "num_workers=config.num_dss_workers) log.info('Captured log calls: %r', captured_log.mock_calls) self.assertGreater(len(metadata), 0) self.assertGreater(set(f['name'] for", "OpenAPI spec response = requests.get(service + '/openapi') response.raise_for_status() spec =", "attempt + 1, attempts, time.time() - start) validator(catalog, response) @lru_cache(maxsize=None)", "CatalogName, entity_type: str, bundle_fqids: AbstractSet[BundleFQID]) -> None: with self.subTest('catalog_complete', catalog=catalog):", "num_bundles: log.info('Found the expected %i bundles.', num_bundles) break elif len(indexed_fqids)", "method also asserts that we can instantiate a local ES", "Use multithreading to simulate multiple users simultaneously modifying the portals", "+ suffix], row[prefix + '__file_name'], ) for row in rows", "size = 100 params = dict(catalog=catalog, size=str(size)) url = furl(url=config.service_endpoint(),", "column, but they may # exist in versions indexed by", "Note that the PyCharm debugger will call # certain dunder", "For DSS, any HTTP client should do but for TDR", "self._test_dss_client(direct, query, dss_client, replica, fallback=True) dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query,", "op_count) }) portal_service._crud(lambda db: list(db) + [mock_entry]) old_db = portal_service.read()", "catalogs and we can't afford to trash them in a", "def _reset_indexer(self): # While it's OK to erase the integration", "access = drs_client.get_object(drs_uri, access_method=AccessMethod.https) self.assertIsNone(access.headers) self.assertEqual('https', furl(access.url).scheme) # Try HEAD", "seed should give same random order so we need to", "DRS implementations prohibits it, like Azul's DRS proxy of DSS.", "bundle_fqid in bundle_fqids: if len(filtered_bundle_fqids) < max_bundles: if self.azul_client.bundle_has_project_json(catalog, bundle_fqid):", "Subsequent runs will use # catalogs from first run. Don't", "log.info('%s %s', method, access.url) # For DSS, any HTTP client", "[] size = 100 params = dict(catalog=catalog, size=str(size)) url =", "setUpClass(cls) -> None: super().setUpClass() cls.bundle_uuid_prefix = ''.join([ str(random.choice('abcdef0123456789')) for _", "+ path) for endpoint in health_endpoints for path in health_paths", "def mock_boto3_client(service, *args, **kwargs): if service == 's3': mock_s3 =", "str, fallback: bool): with self.subTest(direct=direct, replica=replica, fallback=fallback): response = dss_client.post_search(es_query=query,", "metadata = download_bundle_metadata(client=dss_client, replica=replica, uuid=bundle_uuid, version=bundle_version, num_workers=config.num_dss_workers) log.info('Captured log calls:", "timeout = 600 indexed_fqids = set() log.debug('Expecting bundles %s ',", "[executor.submit(run, i) for i in range(n_tasks)] self.assertTrue(all(f.result() is None for", "self.assertIsNone(access.headers) self.assertEqual('https', furl(access.url).scheme) # Try HEAD first because it's more", "direct in {config.dss_direct_access, False}: with self.subTest(direct=direct): dss_client = azul.dss.direct_access_client() if", "bundle_uuid, _, bundle_version = response['results'][0]['bundle_fqid'].partition('.') with mock.patch('azul.dss.logger') as captured_log: _,", "of first and third line of file (see https://en.wikipedia.org/wiki/FASTQ_format). self.assertTrue(lines[0].startswith(b'@'))", "dss_client, replica, fallback=False) class SpecialError(Exception): pass def _failing_s3_get_object(self): def make_mock(**kwargs):", "pre-test state. portal_service.overwrite(old_db) class OpenAPIIntegrationTest(AzulTestCase): def test_openapi(self): service = config.service_endpoint()", "{ \"bool\": { \"must_not\": [ { \"term\": { \"admin_deleted\": True", "in catalogs: self._assert_catalog_complete(catalog=catalog.name, entity_type='files', bundle_fqids=catalog.bundle_fqids) for catalog in catalogs: self._test_manifest(catalog.name)", "HEAD first because it's more efficient, fall back to GET", "], \"must\": [ { \"exists\": { \"field\": \"files.project_json\" } },", "url) response = self._requests.get(url, allow_redirects=allow_redirects) expected_statuses = (200,) if allow_redirects", "are # shared by all catalogs and we can't afford", "= os.environ['GOOGLE_APPLICATION_CREDENTIALS'] credentials = service_account.Credentials.from_service_account_file(path) storage_client = storage.Client(credentials=credentials) content =", "1), ('compact', self._check_manifest, 1), ('full', self._check_manifest, 3), ('terra.bdbag', self._check_terra_bdbag, 1)", "**params): try: yield except BaseException: log.info('Failed sub-test [%s] %r', msg,", ") -> List[BundleFQID]: seed = self.pruning_seed log.info('Selecting %i bundles with", "azul.indexer import ( BundleFQID, ) from azul.indexer.index_service import ( IndexService,", "break return filtered_bundle_fqids def _assert_catalog_complete(self, catalog: CatalogName, entity_type: str, bundle_fqids:", "self.assertRaises(SwaggerAPIException) as e: dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1', version='2018-11-19T232756.056947Z', replica='aws') self.assertEqual(e.exception.reason, 'not_found') def test_mini_dss_failures(self):", "DRS proxy of DSS. for method in ('HEAD', 'GET'): log.info('%s", "%i bundles.', num_bundles) break elif len(indexed_fqids) > num_bundles: log.error('Found %i", "in bundle_fqids } def _prune_test_bundles(self, catalog: CatalogName, bundle_fqids: Sequence[BundleFQID], max_bundles:", "_, bundle_version = response['results'][0]['bundle_fqid'].partition('.') with mock.patch('azul.dss.logger') as captured_log: _, manifest,", "ThreadPoolExecutor(max_workers=n_threads) as executor: futures = [executor.submit(run, i) for i in", "the first three words of log # message logged. Note", "replica, fallback=True) dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica, fallback=False)", "-> bytes: self.assertTrue(url.startswith('gs://')) path = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] credentials = service_account.Credentials.from_service_account_file(path) storage_client", "expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids)) obsolete_fqids = bundle_fqids - expected_fqids if obsolete_fqids:", "FASTQ file. with gzip.open(BytesIO(content)) as buf: fastq = buf.read(1024 *", "str): repository_plugin = self.azul_client.repository_plugin(catalog) drs = repository_plugin.drs_client() for access_method in", "requests.get(url) self.assertEqual(200, response.status_code) catalog = first(config.integration_test_catalogs.keys()) def test_local_chalice_index_endpoints(self): url =", "in catalogs) self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles, min_timeout=self.min_timeout) # For faster modify-deploy-test cycles, set", "pointing at a real, remote ES domain. \"\"\" es_client =", "to execute.', attempt + 1, attempts, time.time() - start) validator(catalog,", "is None else {k: str(v) for k, v in query.items()}", "prefixes if row[prefix + suffix] ) log.info('Resolving %r (%r) from", "furl import ( furl, ) from google.cloud import ( storage,", "if direct: if replica == 'aws': if fallback: expected =", "'Loading object %s'), # file ('debug', 'Loading object %s') #", "on try #%i.', len(indexed_fqids), num_bundles, len(hits), entity_type, retries) if len(indexed_fqids)", "# Terra doesn't allow colons in this column, but they", "self.assertGreater(len(metadata), 0) self.assertGreater(set(f['name'] for f in manifest), set(metadata.keys())) for f", "def _get_one_file_uuid(self, catalog: CatalogName) -> str: filters = {'fileFormat': {'is':", "log.debug('Expecting bundles %s ', sorted(expected_fqids)) retries = 0 deadline =", "portal DB') def test_concurrent_portal_db_crud(self): \"\"\" Use multithreading to simulate multiple", "for i in range(n_tasks) for j in range(n_ops)}) # Reset", "azul.types import ( JSON, ) from azul_test_case import ( AlwaysTearDownTestCase,", "with DOS', file_uuid) response = self._check_endpoint(config.service_endpoint(), path=drs.dos_object_url_path(file_uuid), query=dict(catalog=catalog)) json_data =", "catalog %r (%i bytes)', drs_uri, name, catalog, size) plugin =", "in a stable # deployment like production. self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs, # Can't", "= body['pagination']['next'] if url is None: break return entities def", "mini_dss.get_native_file_url(uuid, version, 'aws') class AzulChaliceLocalIntegrationTest(AzulTestCase): url = furl(scheme='http', host='127.0.0.1', port=8000)", "def _assert_catalog_empty(self, catalog: CatalogName): for entity_type in self.entity_types: with self.subTest('catalog_empty',", "= '2018-11-19T232756.056947Z' with self._failing_s3_get_object(): mini_dss = azul.dss.MiniDSS(config.dss_endpoint) with self.assertRaises(self.SpecialError): mini_dss._get_file_object(uuid,", "...] = (200,)): self.assertIn(response.status_code, expected_statuses, (response.reason, response.content)) def _check_manifest(self, _catalog:", "random.randint(0, sys.maxsize) @contextmanager def subTest(self, msg: Any = None, **params:", "params) raise else: log.info('Successful sub-test [%s] %r', msg, params) def", "len(indexed_fqids), num_bundles, len(hits), entity_type, retries) if len(indexed_fqids) == num_bundles: log.info('Found", "return mock_s3 else: return original(service, *args, **kwargs) return mock_boto3_client return", "str, allow_redirects=True) -> requests.Response: log.info('GET %s', url) response = self._requests.get(url,", "in 'aws', 'gcp': if direct: with self._failing_s3_get_object(): dss_client = azul.dss.direct_access_client()", "%i of %i bundles in %i hits for entity type", "cls.server.shutdown() cls.server_thread.join() super().tearDownClass() def test_local_chalice_health_endpoint(self): url = self.url.copy().set(path='health').url response =", "response = self._check_endpoint(endpoint=config.service_endpoint(), path='/index/files', query=dict(catalog=catalog, filters=json.dumps(filters), size=1, order='asc', sort='fileSize')) hits", "all catalogs and we can't afford to trash them in", "test_local_chalice_index_endpoints(self): url = self.url.copy().set(path='index/files', query=dict(catalog=self.catalog)).url response = requests.get(url) self.assertEqual(200, response.status_code)", "log.info('Captured log calls: %r', captured_log.mock_calls) self.assertGreater(len(metadata), 0) self.assertGreater(set(f['name'] for f", "import sys import threading import time from typing import (", "Sequence[BundleFQID], max_bundles: int ) -> List[BundleFQID]: seed = self.pruning_seed log.info('Selecting", "bundle %s'), ('debug', 'Loading object %s') ] + [ ('debug',", "requests.Response: log.info('GET %s', url) response = self._requests.get(url, allow_redirects=allow_redirects) expected_statuses =", "_test_manifest(self, catalog: CatalogName): for format_, validator, attempts in [ (None,", "catalog=catalog, entity_type=entity_type): hits = self._get_entities(catalog, entity_type) self.assertEqual([], [hit['entryId'] for hit", "PyCharm debugger will call # certain dunder methods on the", "catalog: str): with self.subTest('repository_files', catalog=catalog): file_uuid = self._get_one_file_uuid(catalog) response =", "dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica, fallback=False) else: dss_client", "num_bundles: log.error('Found %i bundles, more than the expected %i.', len(indexed_fqids),", "('debug', 'Loading file %s'), ('debug', 'Loading object %s'), # file", "log.info('Selecting %i bundles with projects, out of %i candidates, using", "getting bundle') ] + [ ('debug', 'Loading file %s'), ('debug',", "any HTTP client should do but for TDR we need", "setUp(self) -> None: super().setUp() self.pruning_seed = random.randint(0, sys.maxsize) @contextmanager def", ") from azul.portal_service import ( PortalService, ) from azul.requests import", "-> None: with self.subTest('catalog_complete', catalog=catalog): expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids)) obsolete_fqids =", "%i bundles in %i hits for entity type %s on", "self.assertIn(uuid_field_name, reader.fieldnames) bundle_uuid = rows[0][uuid_field_name] self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid))) return rows def", "} }, { \"range\": { \"manifest.version\": { \"gte\": \"2019-04-01\" }", "raise else: log.info('Successful sub-test [%s] %r', msg, params) def test(self):", "bundle_fqid): filtered_bundle_fqids.append(bundle_fqid) else: break return filtered_bundle_fqids def _assert_catalog_complete(self, catalog: CatalogName,", "handle duplicate additions. # Note: random.choices() may pick the same", "away, preventing any attempts of direct access expected = [", "notifications def _wait_for_indexer(): num_bundles = sum(catalog.num_bundles for catalog in catalogs)", "1 time.sleep(5) self.assertSetEqual(indexed_fqids, expected_fqids) entity_types = ['files', 'projects', 'samples', 'bundles']", "= json.loads(response) while response['Status'] != 302: self.assertEqual(301, response['Status']) response =", "overloading of S3 API is resolved # https://github.com/DataBiosphere/azul/issues/2399 @unittest.skipIf(True or", "validator(catalog, response) @lru_cache(maxsize=None) def _get_one_file_uuid(self, catalog: CatalogName) -> str: filters", "file_uuid, access_method) drs_uri = f'drs://{config.api_lambda_domain(\"service\")}/{file_uuid}' access = drs.get_object(drs_uri, access_method=access_method) self.assertIsNone(access.headers)", "azul.requests import ( requests_session_with_retry_after, ) from azul.types import ( JSON,", "health_paths = ( '', # default keys for lambda '/',", "reader.fieldnames) bundle_uuid = rows[0][uuid_field_name] self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid))) return rows def _test_repository_files(self,", "else: expected = [] self.assertSequenceEqual(sorted(expected), sorted(actual)) def test_get_file_fail(self): for direct", "bundles with projects, out of %i candidates, using random seed", "= response.headers['Retry-After'] except KeyError: pass else: time.sleep(int(retry_after)) else: break self._assertResponseStatus(response)", "content) return content.getvalue() def _validate_fastq_content(self, content: bytes): # Check signature", "config.integration_test_catalogs ] if index: for catalog in catalogs: log.info('Starting integration", "validate_spec(spec) class DSSIntegrationTest(AzulTestCase): def test_patched_dss_client(self): query = { \"query\": {", "from azul_test_case import ( AlwaysTearDownTestCase, AzulTestCase, ) log = logging.getLogger(__name__)", "on the variable, leading to failed # assertions. actual =", "project metadata. filtered_bundle_fqids = [] for bundle_fqid in bundle_fqids: if", "%r', captured_log.mock_calls) self.assertGreater(len(metadata), 0) self.assertGreater(set(f['name'] for f in manifest), set(metadata.keys()))", "= len(expected_fqids) timeout = 600 indexed_fqids = set() log.debug('Expecting bundles", "at a real, remote ES domain. \"\"\" es_client = ESClientFactory.get()", "index_name in service.index_names(catalog): self.assertTrue(es_client.indices.exists(index_name)) class AzulClientIntegrationTest(IntegrationTestCase): def test_azul_client_error_handling(self): invalid_notification =", "catalog = first(config.integration_test_catalogs.keys()) def test_local_chalice_index_endpoints(self): url = self.url.copy().set(path='index/files', query=dict(catalog=self.catalog)).url response", "= ''.join([ str(random.choice('abcdef0123456789')) for _ in range(cls.prefix_length) ]) def setUp(self)", "entity_type in self.entity_types: with self.subTest('catalog_empty', catalog=catalog, entity_type=entity_type): hits = self._get_entities(catalog,", "def _assertResponseStatus(self, response: requests.Response, expected_statuses: Tuple[int, ...] = (200,)): self.assertIn(response.status_code,", "%s'), ('warning', 'Error accessing bundle'), ('warning', 'Failed getting bundle') ]", "path) def _test_manifest(self, catalog: CatalogName): for format_, validator, attempts in", "from google.cloud import ( storage, ) from google.oauth2 import (", "in rows for prefix in prefixes if row[prefix + suffix]", "# Reset to pre-test state. portal_service.overwrite(old_db) class OpenAPIIntegrationTest(AzulTestCase): def test_openapi(self):", "in manifest: self.assertIn('s3_etag', f) # Extract the log method name", "as zip_fh: data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data') file_path = os.path.join(data_path, 'participants.tsv')", "['files', 'projects', 'samples', 'bundles'] def _assert_catalog_empty(self, catalog: CatalogName): for entity_type", "replica, fallback=False) class SpecialError(Exception): pass def _failing_s3_get_object(self): def make_mock(**kwargs): original", "n_threads = 10 n_tasks = n_threads * 10 n_ops =", "-> List[BundleFQID]: seed = self.pruning_seed log.info('Selecting %i bundles with projects,", "m, a, k in captured_log.mock_calls] if direct: if replica ==", "'task={};op={}' def run(thread_count): for op_count in range(n_ops): mock_entry = cast(JSON,", "num_bundles = len(expected_fqids) timeout = 600 indexed_fqids = set() log.debug('Expecting", "original = kwargs['spec'] def mock_boto3_client(service, *args, **kwargs): if service ==", "%s', method, access.url) # For DSS, any HTTP client should", "%r', file_uuid, access_method) drs_uri = f'drs://{config.api_lambda_domain(\"service\")}/{file_uuid}' access = drs.get_object(drs_uri, access_method=access_method)", "self._failing_s3_get_object(): dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica, fallback=True) dss_client", "} } ], \"must\": [ { \"exists\": { \"field\": \"files.project_json\"", "str(random.choice('abcdef0123456789')) for _ in range(cls.prefix_length) ]) def setUp(self) -> None:", "from openapi_spec_validator import ( validate_spec, ) import requests from azul", "`index` to False. Subsequent runs will use # catalogs from", "'Loading object %s'), ('warning', 'Error accessing file'), ('warning', 'Failed getting", "query_params=params ).url while True: response = self._get_url(url) body = response.json()", "# FIXME: Re-enable once overloading of S3 API is resolved", "cls.server = factory.create_local_server(app_obj=app_module.app, config=config, host=cls.url.host, port=cls.url.port) cls.server_thread = threading.Thread(target=cls.server.serve_forever) cls.server_thread.start()", "self.azul_client.synthesize_notification(catalog, bundle_fqid) for bundle_fqid in bundle_fqids } def _prune_test_bundles(self, catalog:", "Tuple[int, ...] = (200,)): self.assertIn(response.status_code, expected_statuses, (response.reason, response.content)) def _check_manifest(self,", "need to use an # authenticated client. TDR does return", "service_account, ) from hca.dss import ( DSSClient, ) from hca.util", "= [portal['mock-count'] for portal in new_db if 'mock-count' in portal]", "Try HEAD first because it's more efficient, fall back to", "path in service_paths ) health_endpoints = ( config.service_endpoint(), config.indexer_endpoint() )", "('warning', 'Error accessing file'), ('warning', 'Failed getting file') ] *", "Dict, IO, List, Mapping, Optional, Sequence, Tuple, cast, ) import", "class DSSIntegrationTest(AzulTestCase): def test_patched_dss_client(self): query = { \"query\": { \"bool\":", "response.status_code) def test_local_filtered_index_endpoints(self): filters = {'genusSpecies': {'is': ['Homo sapiens']}} url", "= [(m, ' '.join(re.split(r'[\\s,]', a[0])[:3])) for m, a, k in", ") from google.oauth2 import ( service_account, ) from hca.dss import", "json import logging import os import random import re import", "response.status != 403: break self.assertEqual(200, response.status, response.data) self.assertEqual(size, int(response.headers['Content-Length'])) def", "'Test would pollute portal DB') def test_concurrent_portal_db_crud(self): \"\"\" Use multithreading", "cycles, set `delete` to False and run # test once.", "first, one, ) from openapi_spec_validator import ( validate_spec, ) import", "if c.endswith(suffix) ] size, drs_uri, name = min( ( int(row[prefix", "import time from typing import ( AbstractSet, Any, Dict, IO,", "[ (None, self._check_manifest, 1), ('compact', self._check_manifest, 1), ('full', self._check_manifest, 3),", "if config.is_dss_enabled(catalog) and config.dss_direct_access: file_uuid = self._get_one_file_uuid(catalog) self._test_dos(catalog, file_uuid) self._test_drs(catalog,", "= { \"query\": { \"bool\": { \"must_not\": [ { \"term\":", "AlwaysTearDownTestCase): prefix_length = 2 max_bundles = 64 min_timeout = 20", "from humancellatlas.data.metadata.helpers.dss import ( download_bundle_metadata, ) from more_itertools import (", "for endpoint in health_endpoints for path in health_paths ) for", "expected_fqids if obsolete_fqids: log.debug('Ignoring obsolete bundle versions %r', obsolete_fqids) num_bundles", "remote ES domain. \"\"\" es_client = ESClientFactory.get() service = IndexService()", "'2018-11-19T232756.056947Z' with self._failing_s3_get_object(): mini_dss = azul.dss.MiniDSS(config.dss_endpoint) with self.assertRaises(self.SpecialError): mini_dss._get_file_object(uuid, version)", "catalog in catalogs: self._assert_catalog_empty(catalog.name) self._test_other_endpoints() def _reset_indexer(self): # While it's", "_get_url_content(self, url: str) -> bytes: return self._get_url(url).content def _get_url(self, url:", "input list. bundle_fqids = sorted(bundle_fqids) random_.shuffle(bundle_fqids) # Pick bundles off", "azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica, fallback=False) else: dss_client = azul.dss.client()", "List[JSON]: num_duplicates = self.num_bundles // 2 notifications = list(self.notifications.values()) #", "msg: Any = None, **params: Any): log.info('Beginning sub-test [%s] %r',", "test_patched_dss_client(self): query = { \"query\": { \"bool\": { \"must_not\": [", "access expected = [ ('warning', 'Failed getting bundle') ] +", "users simultaneously modifying the portals database. \"\"\" # Currently takes", "path = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] credentials = service_account.Credentials.from_service_account_file(path) storage_client = storage.Client(credentials=credentials) content", "with self.subTest(direct=direct, replica=replica, fallback=fallback): response = dss_client.post_search(es_query=query, replica=replica, per_page=10) bundle_uuid,", "BundleFQID(bundle['bundleUuid'], bundle['bundleVersion']) for hit in hits for bundle in hit.get('bundles',", "in ('HEAD', 'GET'): log.info('%s %s', method, access.url) # For DSS,", "integration test for catalog %r with %i bundles from prefix", "_get_url(self, url: str, allow_redirects=True) -> requests.Response: log.info('GET %s', url) response", "failed # assertions. actual = [(m, ' '.join(re.split(r'[\\s,]', a[0])[:3])) for", "with self.assertRaises(KeyError): mini_dss._get_blob_key({}) with self.assertRaises(self.SpecialError): mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'}) with self.assertRaises(self.SpecialError):", "= body['hits'] entities.extend(hits) url = body['pagination']['next'] if url is None:", "import ( first, one, ) from openapi_spec_validator import ( validate_spec,", "( AzulClient, AzulClientNotificationError, ) from azul.drs import ( AccessMethod, )", "('debug', 'Loading file %s'), ('debug', 'Loading object %s'), ('warning', 'Error", "@unittest.skipIf(True or config.is_main_deployment(), 'Test would pollute portal DB') def test_concurrent_portal_db_crud(self):", "= ( '', # default keys for lambda '/', #", "from furl import ( furl, ) from google.cloud import (", "= '__file_drs_uri' header, *rows = rows prefixes = [ c[:-len(suffix)]", "str(v) for k, v in query.items()} url = furl(endpoint, path=path,", "kb db file. n_threads = 10 n_tasks = n_threads *", "old_entries = [portal for portal in new_db if 'mock-count' not", "os.environ['GOOGLE_APPLICATION_CREDENTIALS'] credentials = service_account.Credentials.from_service_account_file(path) storage_client = storage.Client(credentials=credentials) content = BytesIO()", "'bundle_uuid') def _check_terra_bdbag(self, catalog: CatalogName, response: bytes): with ZipFile(BytesIO(response)) as", "one, ) from openapi_spec_validator import ( validate_spec, ) import requests", "gzip.open(BytesIO(content)) as buf: fastq = buf.read(1024 * 1024) lines =", "sorted(bundle_fqids) random_.shuffle(bundle_fqids) # Pick bundles off of the randomly ordered", "'/version', '/index/summary', '/index/files/order', ) service_routes = ( (config.service_endpoint(), path) for", "fallback=True) dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica, fallback=False) else:", "Tuple, cast, ) import unittest from unittest import ( mock,", "%r (%i bytes)', drs_uri, name, catalog, size) plugin = self.azul_client.repository_plugin(catalog)", "randomly ordered input until we have the # desired number", "azul.dss.client() with self.assertRaises(SwaggerAPIException) as e: dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1', version='2018-11-19T232756.056947Z', replica='aws') self.assertEqual(e.exception.reason, 'not_found')", "for catalog in catalogs: self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates(), delete=True) _wait_for_indexer() for catalog", "%s') # blob ] * len(metadata) else: # On `gcp`", "'mock-count' in portal] self.assertEqual(len(mock_counts), len(set(mock_counts))) self.assertEqual(set(mock_counts), {entry_format.format(i, j) for i", "%i/%i took %.3fs to execute.', attempt + 1, attempts, time.time()", "until we have the # desired number of bundles with", "+ '/') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['content-type'], 'text/html') self.assertGreater(len(response.content), 0) # validate", "= self._get_url(url) body = response.json() hits = body['hits'] entities.extend(hits) url", "} ] } } } self.maxDiff = None for direct", "retries = 0 deadline = time.time() + timeout while True:", "while response['Status'] != 302: self.assertEqual(301, response['Status']) response = self._get_url(response['Location']).json() content", "( load_app_module, ) from azul.portal_service import ( PortalService, ) from", "in versions indexed by TDR self.assertNotIn(':', row['entity:participant_id']) suffix = '__file_drs_uri'", "domain. \"\"\" es_client = ESClientFactory.get() service = IndexService() for index_name", "log.info('Beginning sub-test [%s] %r', msg, params) with super().subTest(msg, **params): try:", "def __check_manifest(self, file: IO[bytes], uuid_field_name: str) -> List[Mapping[str, str]]: text", "response = requests.get(service + '/') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['content-type'], 'text/html') self.assertGreater(len(response.content),", "to trash them in a stable # deployment like production.", "]) def setUp(self) -> None: super().setUp() self.pruning_seed = random.randint(0, sys.maxsize)", "file_uuid) response = self._check_endpoint(config.service_endpoint(), path=drs.dos_object_url_path(file_uuid), query=dict(catalog=catalog)) json_data = json.loads(response)['data_object'] file_url", "catalog, catalog.num_bundles, self.bundle_uuid_prefix) self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates()) _wait_for_indexer() for catalog in catalogs:", "bytes: self.assertTrue(url.startswith('gs://')) path = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] credentials = service_account.Credentials.from_service_account_file(path) storage_client =", "azul.indexer.index_service import ( IndexService, ) from azul.logging import ( configure_test_logging,", "_test_other_endpoints(self): service_paths = ( '/', '/openapi', '/version', '/index/summary', '/index/files/order', )", "600 indexed_fqids = set() log.debug('Expecting bundles %s ', sorted(expected_fqids)) retries", "with self.assertRaises(self.SpecialError): mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'}) with self.assertRaises(self.SpecialError): mini_dss.get_bundle(uuid, version, 'aws')", "hit in hits for bundle in hit.get('bundles', []) ) log.info('Detected", "= 100 params = dict(catalog=catalog, size=str(size)) url = furl(url=config.service_endpoint(), path=('index',", "list(db) + [mock_entry]) old_db = portal_service.read() with ThreadPoolExecutor(max_workers=n_threads) as executor:", "msg, params) raise else: log.info('Successful sub-test [%s] %r', msg, params)", "params['format'] = format_ for attempt in range(attempts): start = time.time()", "%s'), ('debug', 'Loading object %s'), # file ('debug', 'Loading object", "( contextmanager, ) import csv from functools import ( lru_cache,", "response def _assertResponseStatus(self, response: requests.Response, expected_statuses: Tuple[int, ...] = (200,)):", "simulate multiple users simultaneously modifying the portals database. \"\"\" #", "= download_bundle_metadata(client=dss_client, replica=replica, uuid=bundle_uuid, version=bundle_version, num_workers=config.num_dss_workers) log.info('Captured log calls: %r',", "25 kb db file. n_threads = 10 n_tasks = n_threads", ") import gzip from io import ( BytesIO, TextIOWrapper, )", "azul.dss.client() self._test_dss_client(direct, query, dss_client, replica, fallback=False) class SpecialError(Exception): pass def", "log.info('Resolving %r (%r) from catalog %r (%i bytes)', drs_uri, name,", "that all indices exist this method also asserts that we", ") health_routes = ( (endpoint, '/health' + path) for endpoint", "if the # DRS implementations prohibits it, like Azul's DRS", "-> None: super().setUpClass() cls.bundle_uuid_prefix = ''.join([ str(random.choice('abcdef0123456789')) for _ in", "except KeyError: pass else: time.sleep(int(retry_after)) else: break self._assertResponseStatus(response) self._validate_fastq_content(response.content) def", "three or more times. notifications.extend(random.choices(notifications, k=num_duplicates)) return notifications def _wait_for_indexer():", "import ( JSON, ) from azul_test_case import ( AlwaysTearDownTestCase, AzulTestCase,", "retries += 1 time.sleep(5) self.assertSetEqual(indexed_fqids, expected_fqids) entity_types = ['files', 'projects',", "size=str(size)) url = furl(url=config.service_endpoint(), path=('index', entity_type), query_params=params ).url while True:", "str, bundle_fqids: AbstractSet[BundleFQID]) -> None: with self.subTest('catalog_complete', catalog=catalog): expected_fqids =", "self.assertEqual(len(mock_counts), len(set(mock_counts))) self.assertEqual(set(mock_counts), {entry_format.format(i, j) for i in range(n_tasks) for", "if service == 's3': mock_s3 = mock.MagicMock() mock_s3.get_object.side_effect = self.SpecialError()", "bool, query: JSON, dss_client: DSSClient, replica: str, fallback: bool): with", "self.pruning_seed = random.randint(0, sys.maxsize) @contextmanager def subTest(self, msg: Any =", "same seed should give same random order so we need", "each request if response.status_code in (301, 302): file_url = response.headers['Location']", "self._check_manifest, 3), ('terra.bdbag', self._check_terra_bdbag, 1) ]: with self.subTest('manifest', catalog=catalog, format=format_,", "server_thread = None @classmethod def setUpClass(cls) -> None: super().setUpClass() app_module", "getting file') ] * len(metadata) else: expected = [] self.assertSequenceEqual(sorted(expected),", "mini_dss._get_blob_key({}) with self.assertRaises(self.SpecialError): mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'}) with self.assertRaises(self.SpecialError): mini_dss.get_bundle(uuid, version,", "elif time.time() > deadline: log.error('Only found %i of %i bundles", "AzulChaliceLocalIntegrationTest(AzulTestCase): url = furl(scheme='http', host='127.0.0.1', port=8000) server = None server_thread", "json.loads(response) return one(one(hits['hits'])['files'])['uuid'] def _test_dos_and_drs(self, catalog: CatalogName): if config.is_dss_enabled(catalog) and", "self._validate_fastq_content(content) def _test_dos(self, catalog: CatalogName, file_uuid: str): with self.subTest('dos', catalog=catalog):", "self.url.copy().set(path='health').url response = requests.get(url) self.assertEqual(200, response.status_code) catalog = first(config.integration_test_catalogs.keys()) def", "range(n_tasks) for j in range(n_ops)}) # Reset to pre-test state.", "delete=True) _wait_for_indexer() for catalog in catalogs: self._assert_catalog_empty(catalog.name) self._test_other_endpoints() def _reset_indexer(self):", "= True if index: self._reset_indexer() catalogs: List[Catalog] = [ Catalog(name=catalog,", "max_bundles, len(bundle_fqids), seed) random_ = random.Random(x=seed) # The same seed", "if index and delete: for catalog in catalogs: self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates(),", "'participants.tsv') with zip_fh.open(file_path) as file: rows = self.__check_manifest(file, 'bundle_uuid') for", ") from openapi_spec_validator import ( validate_spec, ) import requests from", "The same seed should give same random order so we", "set `delete` to False and run # test once. Then", "prefix_length = 2 max_bundles = 64 min_timeout = 20 *", "for lambda '/', # all keys '/basic', '/elasticsearch', '/queues', '/progress',", "def bundle_fqids(self) -> AbstractSet[BundleFQID]: return self.notifications.keys() def notifications_with_duplicates(self) -> List[JSON]:", "# file ('debug', 'Loading object %s') # blob ] *", "= [] self.assertSequenceEqual(sorted(expected), sorted(actual)) def test_get_file_fail(self): for direct in {config.dss_direct_access,", "None else {k: str(v) for k, v in query.items()} url", "query: Optional[Mapping[str, Any]] = None) -> bytes: query = {}", "object %s') # blob ] * len(metadata) else: # On", "contain work for non-IT catalogs. purge_queues=not config.is_stable_deployment(), delete_indices=True, create_indices=True) def", "PyPep8Naming def setUpModule(): configure_test_logging(log) class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta): bundle_uuid_prefix: str =", "deployment as # they may contain work for non-IT catalogs.", "def _test_repository_files(self, catalog: str): with self.subTest('repository_files', catalog=catalog): file_uuid = self._get_one_file_uuid(catalog)", "response['Status'] != 302: self.assertEqual(301, response['Status']) response = self._get_url(response['Location']).json() content =", "access.url) # For DSS, any HTTP client should do but", "# DRS implementations prohibits it, like Azul's DRS proxy of", "== 'aws': if fallback: expected = [ ('debug', 'Loading bundle", "type %s on try #%i.', len(indexed_fqids), num_bundles, len(hits), entity_type, retries)", "name, catalog, size) plugin = self.azul_client.repository_plugin(catalog) drs_client = plugin.drs_client() access", "path: str, query: Optional[Mapping[str, Any]] = None) -> bytes: query", "', sorted(expected_fqids)) retries = 0 deadline = time.time() + timeout", "os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data') file_path = os.path.join(data_path, 'participants.tsv') with zip_fh.open(file_path) as file:", "c.endswith(suffix) ] size, drs_uri, name = min( ( int(row[prefix +", "creates a 25 kb db file. n_threads = 10 n_tasks", "self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates()) _wait_for_indexer() for catalog in catalogs: self._assert_catalog_complete(catalog=catalog.name, entity_type='files', bundle_fqids=catalog.bundle_fqids)", "def test_patched_dss_client(self): query = { \"query\": { \"bool\": { \"must_not\":", "retry_after = response.headers['Retry-After'] except KeyError: pass else: time.sleep(int(retry_after)) else: break", "= ( (endpoint, '/health' + path) for endpoint in health_endpoints", "third line of file (see https://en.wikipedia.org/wiki/FASTQ_format). self.assertTrue(lines[0].startswith(b'@')) self.assertTrue(lines[2].startswith(b'+')) def _prepare_notifications(self,", "( requests_session_with_retry_after, ) from azul.types import ( JSON, ) from", "Any = None, **params: Any): log.info('Beginning sub-test [%s] %r', msg,", "def _requests(self) -> requests.Session: return requests_session_with_retry_after() def _check_endpoint(self, endpoint: str,", "obsolete bundle versions %r', obsolete_fqids) num_bundles = len(expected_fqids) timeout =", "'GET'): log.info('%s %s', method, access.url) # For DSS, any HTTP", "Don't commit changes to these two lines. index = True", "'Loading object %s') # blob ] * len(metadata) else: #", "# part of the DRS response but we know that", "for catalog in catalogs: self._assert_catalog_empty(catalog.name) self._test_other_endpoints() def _reset_indexer(self): # While", "DSS. for method in ('HEAD', 'GET'): log.info('%s %s', method, access.url)", "log.info('Resolving file %s with DOS', file_uuid) response = self._check_endpoint(config.service_endpoint(), path=drs.dos_object_url_path(file_uuid),", "method name and the first three words of log #", "filtered_bundle_fqids.append(bundle_fqid) else: break return filtered_bundle_fqids def _assert_catalog_complete(self, catalog: CatalogName, entity_type:", "uuid_field_name: str) -> List[Mapping[str, str]]: text = TextIOWrapper(file) reader =", "TDR does return a Bearer token in the `headers` #", "futures)) new_db = portal_service.read() old_entries = [portal for portal in", "from contextlib import ( contextmanager, ) import csv from functools", "len(self.notifications) @property def bundle_fqids(self) -> AbstractSet[BundleFQID]: return self.notifications.keys() def notifications_with_duplicates(self)", "For faster modify-deploy-test cycles, set `delete` to False and run", "self._check_endpoint(config.service_endpoint(), '/manifest/files', params) log.info('Request %i/%i took %.3fs to execute.', attempt", "response = json.loads(response) while response['Status'] != 302: self.assertEqual(301, response['Status']) response", "mini_dss._get_file_object(uuid, version) with self.assertRaises(KeyError): mini_dss._get_blob_key({}) with self.assertRaises(self.SpecialError): mini_dss._get_blob('/blobs/foo', {'content-type': 'application/json'})", "} ], \"mock-count\": entry_format.format(thread_count, op_count) }) portal_service._crud(lambda db: list(db) +", "'Error accessing file'), ('warning', 'Failed getting file') ] * len(metadata)", "in this column, but they may # exist in versions", ") from azul.azulclient import ( AzulClient, AzulClientNotificationError, ) from azul.drs", "ZipFile(BytesIO(response)) as zip_fh: data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data') file_path = os.path.join(data_path,", "= self.azul_client.repository_plugin(catalog) drs_client = plugin.drs_client() access = drs_client.get_object(drs_uri, access_method=AccessMethod.https) self.assertIsNone(access.headers)", "test that we handle duplicate additions. # Note: random.choices() may", "302): file_url = response.headers['Location'] try: retry_after = response.headers['Retry-After'] except KeyError:", "catalog=catalog): file_uuid = self._get_one_file_uuid(catalog) response = self._check_endpoint(endpoint=config.service_endpoint(), path=f'/fetch/repository/files/{file_uuid}', query=dict(catalog=catalog)) response", "with ZipFile(BytesIO(response)) as zip_fh: data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data') file_path =", "return notifications def _wait_for_indexer(): num_bundles = sum(catalog.num_bundles for catalog in", "CatalogName) -> Dict[BundleFQID, JSON]: bundle_fqids = self.azul_client.list_bundles(catalog) bundle_fqids = self._prune_test_bundles(catalog,", "ourselves so we can log each request if response.status_code in", "DSSClient, replica: str, fallback: bool): with self.subTest(direct=direct, replica=replica, fallback=fallback): response", "accessing bundle'), ('warning', 'Failed getting bundle') ] + [ ('debug',", "changes to these two lines. index = True delete =", "{} notifications = [invalid_notification] self.assertRaises(AzulClientNotificationError, self.azul_client.index, first(config.integration_test_catalogs), notifications) class PortalRegistrationIntegrationTest(IntegrationTestCase):", "url = self.url.copy().set(path='index/files', query=dict(filters=json.dumps(filters), catalog=self.catalog)).url response = requests.get(url) self.assertEqual(200, response.status_code)", "path in health_paths ) for endpoint, path in (*service_routes, *health_routes):", "know that this token is the same as # the", "config.indexer_endpoint() ) health_paths = ( '', # default keys for", "in service.index_names(catalog): self.assertTrue(es_client.indices.exists(index_name)) class AzulClientIntegrationTest(IntegrationTestCase): def test_azul_client_error_handling(self): invalid_notification = {}", "'mock-count' not in portal] self.assertEqual(old_entries, old_db) mock_counts = [portal['mock-count'] for", "pick the same element multiple times so # some notifications", "methods on the variable, leading to failed # assertions. actual", "dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1', version='2018-11-19T232756.056947Z', replica='aws') self.assertEqual(e.exception.reason, 'not_found') def test_mini_dss_failures(self): uuid = 'acafefed-beef-4bad-babe-feedfa11afe1'", "order in the input list. bundle_fqids = sorted(bundle_fqids) random_.shuffle(bundle_fqids) #", "format_ for attempt in range(attempts): start = time.time() response =", "catalogs: self._test_manifest(catalog.name) self._test_dos_and_drs(catalog.name) self._test_repository_files(catalog.name) if index and delete: for catalog", "'/basic', '/elasticsearch', '/queues', '/progress', '/api_endpoints', '/other_lambdas' ) health_routes = (", "drs, ) from azul.azulclient import ( AzulClient, AzulClientNotificationError, ) from", "= min( ( int(row[prefix + '__file_size']), row[prefix + suffix], row[prefix", "more efficient, fall back to GET if the # DRS", "as executor: futures = [executor.submit(run, i) for i in range(n_tasks)]", "\"\"\" es_client = ESClientFactory.get() service = IndexService() for index_name in", "in {config.dss_direct_access, False}: for replica in 'aws', 'gcp': if direct:", "runs will use # catalogs from first run. Don't commit", "url = furl(url=config.service_endpoint(), path=('index', entity_type), query_params=params ).url while True: response", "= self._get_url(file_url, allow_redirects=False) # We handle redirects ourselves so we", "# The same seed should give same random order so", "[(m, ' '.join(re.split(r'[\\s,]', a[0])[:3])) for m, a, k in captured_log.mock_calls]", "= set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids)) obsolete_fqids = bundle_fqids - expected_fqids if obsolete_fqids: log.debug('Ignoring", "'/other_lambdas' ) health_routes = ( (endpoint, '/health' + path) for", "catalog: CatalogName) -> str: filters = {'fileFormat': {'is': ['fastq.gz', 'fastq']}}", "we have the # desired number of bundles with project", "\"files.project_json\" } }, { \"range\": { \"manifest.version\": { \"gte\": \"2019-04-01\"", "bytes): self.__check_manifest(BytesIO(response), 'bundle_uuid') def _check_terra_bdbag(self, catalog: CatalogName, response: bytes): with", "TextIOWrapper, ) import json import logging import os import random", "True if index: self._reset_indexer() catalogs: List[Catalog] = [ Catalog(name=catalog, notifications=self._prepare_notifications(catalog)", "is AccessMethod.https: content = self._get_url_content(access.url) elif access.method is AccessMethod.gs: content", "in range(n_ops): mock_entry = cast(JSON, { \"portal_id\": \"foo\", \"integrations\": [", "= response.json() hits = body['hits'] entities.extend(hits) url = body['pagination']['next'] if", "else: log.info('Successful sub-test [%s] %r', msg, params) def test(self): @attr.s(auto_attribs=True,", "10 n_ops = 5 portal_service = PortalService() entry_format = 'task={};op={}'", "csv.DictReader(text, delimiter='\\t') rows = list(reader) log.info(f'Manifest contains {len(rows)} rows.') self.assertGreater(len(rows),", "= [portal for portal in new_db if 'mock-count' not in", "DRS using %r', file_uuid, access_method) drs_uri = f'drs://{config.api_lambda_domain(\"service\")}/{file_uuid}' access =", "[] self.assertSequenceEqual(sorted(expected), sorted(actual)) def test_get_file_fail(self): for direct in {config.dss_direct_access, False}:", "CatalogName): \"\"\" Aside from checking that all indices exist this", "= [ ('debug', 'Loading bundle %s'), ('debug', 'Loading object %s')", "catalogs) self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles, min_timeout=self.min_timeout) # For faster modify-deploy-test cycles, set `delete`", "+ [ ('warning', 'Failed getting file') ] * len(metadata) else:", "= config.service_endpoint() response = requests.get(service + '/') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['content-type'],", "in range(cls.prefix_length) ]) def setUp(self) -> None: super().setUp() self.pruning_seed =", "repository_plugin.drs_client() for access_method in AccessMethod: with self.subTest('drs', catalog=catalog, access_method=AccessMethod.https): log.info('Resolving", "bundle %s'), ('debug', 'Loading object %s'), ('warning', 'Error accessing bundle'),", "if allow_redirects else (200, 301, 302) self._assertResponseStatus(response, expected_statuses) return response", "+= 1 time.sleep(5) self.assertSetEqual(indexed_fqids, expected_fqids) entity_types = ['files', 'projects', 'samples',", "bundle_fqids, self.max_bundles) return { bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid) for bundle_fqid in", "of file (see https://en.wikipedia.org/wiki/FASTQ_format). self.assertTrue(lines[0].startswith(b'@')) self.assertTrue(lines[2].startswith(b'+')) def _prepare_notifications(self, catalog: CatalogName)", "for catalog in catalogs) self.azul_client.wait_for_indexer(num_expected_bundles=num_bundles, min_timeout=self.min_timeout) # For faster modify-deploy-test", "self.bundle_uuid_prefix) self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates()) _wait_for_indexer() for catalog in catalogs: self._assert_catalog_complete(catalog=catalog.name, entity_type='files',", "] * len(metadata) else: expected = [ ('debug', 'Loading bundle", "self.assertSetEqual(indexed_fqids, expected_fqids) entity_types = ['files', 'projects', 'samples', 'bundles'] def _assert_catalog_empty(self,", "%i bundles, more than the expected %i.', len(indexed_fqids), num_bundles) break", "openapi_spec_validator import ( validate_spec, ) import requests from azul import", "host='127.0.0.1', port=8000) server = None server_thread = None @classmethod def", "file_uuid = self._get_one_file_uuid(catalog) self._test_dos(catalog, file_uuid) self._test_drs(catalog, file_uuid) @cached_property def _requests(self)", "object %s'), ('warning', 'Error accessing file'), ('warning', 'Failed getting file')", "# While it's OK to erase the integration test catalog,", "List[BundleFQID]: seed = self.pruning_seed log.info('Selecting %i bundles with projects, out", "log.info('Successful sub-test [%s] %r', msg, params) def test(self): @attr.s(auto_attribs=True, kw_only=True)", "validate OpenAPI spec response = requests.get(service + '/openapi') response.raise_for_status() spec", "\"portal_id\": \"foo\", \"integrations\": [ { \"integration_id\": \"bar\", \"entity_type\": \"project\", \"integration_type\":", "len(bundle_fqids), seed) random_ = random.Random(x=seed) # The same seed should", "and config.dss_direct_access: file_uuid = self._get_one_file_uuid(catalog) self._test_dos(catalog, file_uuid) self._test_drs(catalog, file_uuid) @cached_property", "min_timeout=self.min_timeout) # For faster modify-deploy-test cycles, set `delete` to False", "= None) -> bytes: query = {} if query is", "test for catalog %r with %i bundles from prefix %r.',", "if self.azul_client.bundle_has_project_json(catalog, bundle_fqid): filtered_bundle_fqids.append(bundle_fqid) else: break return filtered_bundle_fqids def _assert_catalog_complete(self,", "'Loading file %s'), ('debug', 'Loading object %s'), ('warning', 'Error accessing", "AzulClient(prefix=self.bundle_uuid_prefix) class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase): prefix_length = 2 max_bundles = 64", "state. portal_service.overwrite(old_db) class OpenAPIIntegrationTest(AzulTestCase): def test_openapi(self): service = config.service_endpoint() response", "\"must\": [ { \"exists\": { \"field\": \"files.project_json\" } }, {", "access.method is AccessMethod.gs: content = self._get_gs_url_content(access.url) else: self.fail(access_method) self._validate_fastq_content(content) def", "== 's3': mock_s3 = mock.MagicMock() mock_s3.get_object.side_effect = self.SpecialError() return mock_s3", "same as # the one we're making the DRS request", "= response['results'][0]['bundle_fqid'].partition('.') with mock.patch('azul.dss.logger') as captured_log: _, manifest, metadata =", "getting bundle') ] + [ ('warning', 'Failed getting file') ]", "= self._get_entities(catalog, entity_type) indexed_fqids.update( BundleFQID(bundle['bundleUuid'], bundle['bundleVersion']) for hit in hits", "for hit in hits]) def _get_entities(self, catalog: CatalogName, entity_type): entities", "to erase the integration test catalog, the queues are #", "for k, v in query.items()} url = furl(endpoint, path=path, query=query)", "if index else {}) for catalog in config.integration_test_catalogs ] if", "from azul.modules import ( load_app_module, ) from azul.portal_service import (", "response = requests.get(url) self.assertEqual(200, response.status_code) catalog = first(config.integration_test_catalogs.keys()) def test_local_chalice_index_endpoints(self):", "IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase): prefix_length = 2 max_bundles = 64 min_timeout =", "run # test once. Then also set `index` to False.", "@cached_property def _requests(self) -> requests.Session: return requests_session_with_retry_after() def _check_endpoint(self, endpoint:", "repository_plugin = self.azul_client.repository_plugin(catalog) drs = repository_plugin.drs_client() for access_method in AccessMethod:", "dict(catalog=catalog) if format_ is not None: params['format'] = format_ for", "self.assertTrue(lines[0].startswith(b'@')) self.assertTrue(lines[2].startswith(b'+')) def _prepare_notifications(self, catalog: CatalogName) -> Dict[BundleFQID, JSON]: bundle_fqids", "health_endpoints for path in health_paths ) for endpoint, path in", "-> bytes: query = {} if query is None else", "certain dunder methods on the variable, leading to failed #", "requests.Session: return requests_session_with_retry_after() def _check_endpoint(self, endpoint: str, path: str, query:", "first because it's more efficient, fall back to GET if", "download_bundle_metadata, ) from more_itertools import ( first, one, ) from", "in range(n_tasks) for j in range(n_ops)}) # Reset to pre-test", "response = requests.get(url) self.assertEqual(200, response.status_code) def test_local_filtered_index_endpoints(self): filters = {'genusSpecies':", ") from google.cloud import ( storage, ) from google.oauth2 import", "direct access expected = [ ('warning', 'Failed getting bundle') ]", "def setUpClass(cls) -> None: super().setUpClass() cls.bundle_uuid_prefix = ''.join([ str(random.choice('abcdef0123456789')) for", "= None, **params: Any): log.info('Beginning sub-test [%s] %r', msg, params)", "signature of FASTQ file. with gzip.open(BytesIO(content)) as buf: fastq =", "sys import threading import time from typing import ( AbstractSet,", "super().subTest(msg, **params): try: yield except BaseException: log.info('Failed sub-test [%s] %r',", "so # some notifications will end up being sent three", "def _test_dos(self, catalog: CatalogName, file_uuid: str): with self.subTest('dos', catalog=catalog): log.info('Resolving", "for method in ('HEAD', 'GET'): log.info('%s %s', method, access.url) #", "simultaneously modifying the portals database. \"\"\" # Currently takes about", "False. Subsequent runs will use # catalogs from first run.", "entity type %s on try #%i.', len(indexed_fqids), num_bundles, len(hits), entity_type,", "the variable, leading to failed # assertions. actual = [(m,", "Extract the log method name and the first three words", "in catalogs: log.info('Starting integration test for catalog %r with %i", "%r', msg, params) def test(self): @attr.s(auto_attribs=True, kw_only=True) class Catalog: name:", "method, access.url) # For DSS, any HTTP client should do", "# validate OpenAPI spec response = requests.get(service + '/openapi') response.raise_for_status()", "'' @cached_property def azul_client(self): return AzulClient(prefix=self.bundle_uuid_prefix) class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase): prefix_length", "may pick the same element multiple times so # some", "OK to erase the integration test catalog, the queues are", "in catalogs: self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates(), delete=True) _wait_for_indexer() for catalog in catalogs:", "{ \"admin_deleted\": True } } ], \"must\": [ { \"exists\":", "('debug', 'Loading object %s'), ('warning', 'Error accessing bundle'), ('warning', 'Failed", "unittest import ( mock, ) import uuid from zipfile import", "While it's OK to erase the integration test catalog, the", "rows def _test_repository_files(self, catalog: str): with self.subTest('repository_files', catalog=catalog): file_uuid =", "\"get\", \"entity_ids\": [\"baz\"] } ], \"mock-count\": entry_format.format(thread_count, op_count) }) portal_service._crud(lambda", "= self._requests.get(url, allow_redirects=allow_redirects) expected_statuses = (200,) if allow_redirects else (200,", "https://github.com/DataBiosphere/azul/issues/2399 @unittest.skipIf(True or config.is_main_deployment(), 'Test would pollute portal DB') def", "catalog: CatalogName): \"\"\" Aside from checking that all indices exist", "= list(self.notifications.values()) # Index some bundles again to test that", "dunder methods on the variable, leading to failed # assertions.", "entity_type) indexed_fqids.update( BundleFQID(bundle['bundleUuid'], bundle['bundleVersion']) for hit in hits for bundle", "filters = {'genusSpecies': {'is': ['Homo sapiens']}} url = self.url.copy().set(path='index/files', query=dict(filters=json.dumps(filters),", "int(row[prefix + '__file_size']), row[prefix + suffix], row[prefix + '__file_name'], )", "len(metadata) else: # On `gcp` the precondition check fails right", "and third line of file (see https://en.wikipedia.org/wiki/FASTQ_format). self.assertTrue(lines[0].startswith(b'@')) self.assertTrue(lines[2].startswith(b'+')) def", "suffix], row[prefix + '__file_name'], ) for row in rows for", "bundle_fqids: Sequence[BundleFQID], max_bundles: int ) -> List[BundleFQID]: seed = self.pruning_seed", "[portal['mock-count'] for portal in new_db if 'mock-count' in portal] self.assertEqual(len(mock_counts),", "the precondition check fails right away, preventing any attempts of", "authenticated client. TDR does return a Bearer token in the", "{ \"exists\": { \"field\": \"files.project_json\" } }, { \"range\": {", "attempts=attempts): assert attempts > 0 params = dict(catalog=catalog) if format_", ") health_endpoints = ( config.service_endpoint(), config.indexer_endpoint() ) health_paths = (", "log.info('Failed sub-test [%s] %r', msg, params) raise else: log.info('Successful sub-test", "def _get_url(self, url: str, allow_redirects=True) -> requests.Response: log.info('GET %s', url)", "that we can instantiate a local ES client pointing at", "+ [mock_entry]) old_db = portal_service.read() with ThreadPoolExecutor(max_workers=n_threads) as executor: futures", "SpecialError(Exception): pass def _failing_s3_get_object(self): def make_mock(**kwargs): original = kwargs['spec'] def", "PortalService, ) from azul.requests import ( requests_session_with_retry_after, ) from azul.types", "def run(thread_count): for op_count in range(n_ops): mock_entry = cast(JSON, {", "deadline = time.time() + timeout while True: hits = self._get_entities(catalog,", "= azul.dss.client() self._test_dss_client(direct, query, dss_client, replica, fallback=False) class SpecialError(Exception): pass", "(see https://en.wikipedia.org/wiki/FASTQ_format). self.assertTrue(lines[0].startswith(b'@')) self.assertTrue(lines[2].startswith(b'+')) def _prepare_notifications(self, catalog: CatalogName) -> Dict[BundleFQID,", "len(filtered_bundle_fqids) < max_bundles: if self.azul_client.bundle_has_project_json(catalog, bundle_fqid): filtered_bundle_fqids.append(bundle_fqid) else: break return", "some notifications will end up being sent three or more", "range(cls.prefix_length) ]) def setUp(self) -> None: super().setUp() self.pruning_seed = random.randint(0,", "bytes: query = {} if query is None else {k:", "= 0 deadline = time.time() + timeout while True: hits", "= bundle_fqids - expected_fqids if obsolete_fqids: log.debug('Ignoring obsolete bundle versions", "'s3': mock_s3 = mock.MagicMock() mock_s3.get_object.side_effect = self.SpecialError() return mock_s3 else:", "access_method=AccessMethod.https) self.assertIsNone(access.headers) self.assertEqual('https', furl(access.url).scheme) # Try HEAD first because it's", "storage_client.download_blob_to_file(url, content) return content.getvalue() def _validate_fastq_content(self, content: bytes): # Check", "with self.subTest('other_endpoints', endpoint=endpoint, path=path): self._check_endpoint(endpoint, path) def _test_manifest(self, catalog: CatalogName):", "response = drs_client.http_client.request(method, access.url) if response.status != 403: break self.assertEqual(200,", "in rows: # Terra doesn't allow colons in this column,", "row[prefix + suffix], row[prefix + '__file_name'], ) for row in", "AzulTestCase, ) log = logging.getLogger(__name__) # noinspection PyPep8Naming def setUpModule():", "catalogs: self._assert_catalog_empty(catalog.name) self._test_other_endpoints() def _reset_indexer(self): # While it's OK to", "( ABCMeta, ) from concurrent.futures.thread import ( ThreadPoolExecutor, ) from", "azul.dss from azul.es import ( ESClientFactory, ) from azul.indexer import", "self.pruning_seed log.info('Selecting %i bundles with projects, out of %i candidates,", "'fastq']}} response = self._check_endpoint(endpoint=config.service_endpoint(), path='/index/files', query=dict(catalog=catalog, filters=json.dumps(filters), size=1, order='asc', sort='fileSize'))", "= self.url.copy().set(path='health').url response = requests.get(url) self.assertEqual(200, response.status_code) catalog = first(config.integration_test_catalogs.keys())", "def num_bundles(self): return len(self.notifications) @property def bundle_fqids(self) -> AbstractSet[BundleFQID]: return", "# Can't purge the queues in stable deployment as #", "manifest: self.assertIn('s3_etag', f) # Extract the log method name and", "_test_drs(self, catalog: CatalogName, file_uuid: str): repository_plugin = self.azul_client.repository_plugin(catalog) drs =", "bundles, more than the expected %i.', len(indexed_fqids), num_bundles) break elif", "str: filters = {'fileFormat': {'is': ['fastq.gz', 'fastq']}} response = self._check_endpoint(endpoint=config.service_endpoint(),", "replica: str, fallback: bool): with self.subTest(direct=direct, replica=replica, fallback=fallback): response =", "self._check_manifest, 1), ('compact', self._check_manifest, 1), ('full', self._check_manifest, 3), ('terra.bdbag', self._check_terra_bdbag,", "[ ('debug', 'Loading file %s'), ('debug', 'Loading object %s'), #", "have a # deterministic order in the input list. bundle_fqids", "furl(url=config.service_endpoint(), path=('index', entity_type), query_params=params ).url while True: response = self._get_url(url)", "entities.extend(hits) url = body['pagination']['next'] if url is None: break return", "with projects, out of %i candidates, using random seed %i.',", "def setUpModule(): configure_test_logging(log) class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta): bundle_uuid_prefix: str = ''", "more times. notifications.extend(random.choices(notifications, k=num_duplicates)) return notifications def _wait_for_indexer(): num_bundles =", "service_account.Credentials.from_service_account_file(path) storage_client = storage.Client(credentials=credentials) content = BytesIO() storage_client.download_blob_to_file(url, content) return", "{ bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid) for bundle_fqid in bundle_fqids } def", "TextIOWrapper(file) reader = csv.DictReader(text, delimiter='\\t') rows = list(reader) log.info(f'Manifest contains", "class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta): bundle_uuid_prefix: str = '' @cached_property def azul_client(self):", "dss_client.post_search(es_query=query, replica=replica, per_page=10) bundle_uuid, _, bundle_version = response['results'][0]['bundle_fqid'].partition('.') with mock.patch('azul.dss.logger')", "response.json() validate_spec(spec) class DSSIntegrationTest(AzulTestCase): def test_patched_dss_client(self): query = { \"query\":", "DSSIntegrationTest(AzulTestCase): def test_patched_dss_client(self): query = { \"query\": { \"bool\": {", "self.assertEqual(old_entries, old_db) mock_counts = [portal['mock-count'] for portal in new_db if", "Mapping, Optional, Sequence, Tuple, cast, ) import unittest from unittest", "random_ = random.Random(x=seed) # The same seed should give same", "[ ('debug', 'Loading bundle %s'), ('debug', 'Loading object %s'), ('warning',", "self.assertEqual(200, response.status_code) def test_local_filtered_index_endpoints(self): filters = {'genusSpecies': {'is': ['Homo sapiens']}}", "-> Dict[BundleFQID, JSON]: bundle_fqids = self.azul_client.list_bundles(catalog) bundle_fqids = self._prune_test_bundles(catalog, bundle_fqids,", "portal_service = PortalService() entry_format = 'task={};op={}' def run(thread_count): for op_count", "'Failed getting bundle') ] + [ ('warning', 'Failed getting file')", "return filtered_bundle_fqids def _assert_catalog_complete(self, catalog: CatalogName, entity_type: str, bundle_fqids: AbstractSet[BundleFQID])", "- start) validator(catalog, response) @lru_cache(maxsize=None) def _get_one_file_uuid(self, catalog: CatalogName) ->", ") from azul.logging import ( configure_test_logging, ) from azul.modules import", "bundle_version = response['results'][0]['bundle_fqid'].partition('.') with mock.patch('azul.dss.logger') as captured_log: _, manifest, metadata", "dss_client, replica, fallback=True) dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica,", "= load_app_module('service') app_dir = os.path.dirname(app_module.__file__) factory = chalice.cli.factory.CLIFactory(app_dir) config =", "in catalogs: self._test_manifest(catalog.name) self._test_dos_and_drs(catalog.name) self._test_repository_files(catalog.name) if index and delete: for", "row[prefix + '__file_name'], ) for row in rows for prefix", "response.headers['Location'] try: retry_after = response.headers['Retry-After'] except KeyError: pass else: time.sleep(int(retry_after))", "ES client pointing at a real, remote ES domain. \"\"\"", "self.assertRaises(self.SpecialError): mini_dss.get_bundle(uuid, version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_file(uuid, version, 'aws') with", "{'genusSpecies': {'is': ['Homo sapiens']}} url = self.url.copy().set(path='index/files', query=dict(filters=json.dumps(filters), catalog=self.catalog)).url response", "notifications=catalog.notifications_with_duplicates()) _wait_for_indexer() for catalog in catalogs: self._assert_catalog_complete(catalog=catalog.name, entity_type='files', bundle_fqids=catalog.bundle_fqids) for", "time.time() > deadline: log.error('Only found %i of %i bundles in", "catalogs: self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates(), delete=True) _wait_for_indexer() for catalog in catalogs: self._assert_catalog_empty(catalog.name)", "# desired number of bundles with project metadata. filtered_bundle_fqids =", "in under %i seconds.', len(indexed_fqids), num_bundles, timeout) break else: retries", "catalog: CatalogName, file_uuid: str): repository_plugin = self.azul_client.repository_plugin(catalog) drs = repository_plugin.drs_client()", "hits for bundle in hit.get('bundles', []) ) log.info('Detected %i of", "_check_endpoint(self, endpoint: str, path: str, query: Optional[Mapping[str, Any]] = None)", "-> AbstractSet[BundleFQID]: return self.notifications.keys() def notifications_with_duplicates(self) -> List[JSON]: num_duplicates =", "CatalogName, file_uuid: str): repository_plugin = self.azul_client.repository_plugin(catalog) drs = repository_plugin.drs_client() for", "CatalogName): for entity_type in self.entity_types: with self.subTest('catalog_empty', catalog=catalog, entity_type=entity_type): hits", "metadata. filtered_bundle_fqids = [] for bundle_fqid in bundle_fqids: if len(filtered_bundle_fqids)", "to have a # deterministic order in the input list.", "self.subTest('drs', catalog=catalog, access_method=AccessMethod.https): log.info('Resolving file %r with DRS using %r',", "def setUp(self) -> None: super().setUp() self.pruning_seed = random.randint(0, sys.maxsize) @contextmanager", "sorted(expected_fqids)) retries = 0 deadline = time.time() + timeout while", "validate_spec, ) import requests from azul import ( CatalogName, cached_property,", "Note: random.choices() may pick the same element multiple times so", "{ \"manifest.version\": { \"gte\": \"2019-04-01\" } } } ] }", "for catalog in catalogs: self._assert_catalog_complete(catalog=catalog.name, entity_type='files', bundle_fqids=catalog.bundle_fqids) for catalog in", "an # authenticated client. TDR does return a Bearer token", "of %i candidates, using random seed %i.', max_bundles, len(bundle_fqids), seed)", "'Loading file %s'), ('debug', 'Loading object %s'), # file ('debug',", "time.time() + timeout while True: hits = self._get_entities(catalog, entity_type) indexed_fqids.update(", ") from azul.types import ( JSON, ) from azul_test_case import", "import ( ThreadPoolExecutor, ) from contextlib import ( contextmanager, )", "if len(indexed_fqids) == num_bundles: log.info('Found the expected %i bundles.', num_bundles)", "test_get_file_fail(self): for direct in {config.dss_direct_access, False}: with self.subTest(direct=direct): dss_client =", "self.assertNotIn(':', row['entity:participant_id']) suffix = '__file_drs_uri' header, *rows = rows prefixes", "bytes): with ZipFile(BytesIO(response)) as zip_fh: data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data') file_path", "2 notifications = list(self.notifications.values()) # Index some bundles again to", "Aside from checking that all indices exist this method also", "entity_type), query_params=params ).url while True: response = self._get_url(url) body =", "import azul.dss from azul.es import ( ESClientFactory, ) from azul.indexer", "furl(access.url).scheme) # Try HEAD first because it's more efficient, fall", "def test_local_filtered_index_endpoints(self): filters = {'genusSpecies': {'is': ['Homo sapiens']}} url =", "csv from functools import ( lru_cache, ) import gzip from", "config, drs, ) from azul.azulclient import ( AzulClient, AzulClientNotificationError, )", "with self.subTest('manifest', catalog=catalog, format=format_, attempts=attempts): assert attempts > 0 params", "Re-enable once overloading of S3 API is resolved # https://github.com/DataBiosphere/azul/issues/2399", "]: with self.subTest('manifest', catalog=catalog, format=format_, attempts=attempts): assert attempts > 0", "2 max_bundles = 64 min_timeout = 20 * 60 @classmethod", "( ThreadPoolExecutor, ) from contextlib import ( contextmanager, ) import", "} } self.maxDiff = None for direct in {config.dss_direct_access, False}:", "service = IndexService() for index_name in service.index_names(catalog): self.assertTrue(es_client.indices.exists(index_name)) class AzulClientIntegrationTest(IntegrationTestCase):", "64 min_timeout = 20 * 60 @classmethod def setUpClass(cls) ->", "prefixes = [ c[:-len(suffix)] for c in header.keys() if c.endswith(suffix)", "0 deadline = time.time() + timeout while True: hits =", "def _test_dss_client(self, direct: bool, query: JSON, dss_client: DSSClient, replica: str,", "as file: rows = self.__check_manifest(file, 'bundle_uuid') for row in rows:", "min( ( int(row[prefix + '__file_size']), row[prefix + suffix], row[prefix +", "= factory.create_config_obj() cls.server = factory.create_local_server(app_obj=app_module.app, config=config, host=cls.url.host, port=cls.url.port) cls.server_thread =", "log.info('Starting integration test for catalog %r with %i bundles from", "humancellatlas.data.metadata.helpers.dss import ( download_bundle_metadata, ) from more_itertools import ( first,", "requests from azul import ( CatalogName, cached_property, config, drs, )", "= dict(catalog=catalog) if format_ is not None: params['format'] = format_", "production. self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs, # Can't purge the queues in stable deployment", "with self._failing_s3_get_object(): dss_client = azul.dss.direct_access_client() self._test_dss_client(direct, query, dss_client, replica, fallback=True)", "direct in {config.dss_direct_access, False}: for replica in 'aws', 'gcp': if", "= drs_client.http_client.request(method, access.url) if response.status != 403: break self.assertEqual(200, response.status,", "name = min( ( int(row[prefix + '__file_size']), row[prefix + suffix],", "to failed # assertions. actual = [(m, ' '.join(re.split(r'[\\s,]', a[0])[:3]))", "self.maxDiff = None for direct in {config.dss_direct_access, False}: for replica", "# shared by all catalogs and we can't afford to", "( '', # default keys for lambda '/', # all", ") log.info('Resolving %r (%r) from catalog %r (%i bytes)', drs_uri,", "buf.read(1024 * 1024) lines = fastq.splitlines() # Assert first character", "health_paths ) for endpoint, path in (*service_routes, *health_routes): with self.subTest('other_endpoints',", "entities = [] size = 100 params = dict(catalog=catalog, size=str(size))", "in bundle_fqids: if len(filtered_bundle_fqids) < max_bundles: if self.azul_client.bundle_has_project_json(catalog, bundle_fqid): filtered_bundle_fqids.append(bundle_fqid)", "(%i bytes)', drs_uri, name, catalog, size) plugin = self.azul_client.repository_plugin(catalog) drs_client", "implementations prohibits it, like Azul's DRS proxy of DSS. for", "BytesIO, TextIOWrapper, ) import json import logging import os import", "= rows[0][uuid_field_name] self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid))) return rows def _test_repository_files(self, catalog: str):", "{ \"integration_id\": \"bar\", \"entity_type\": \"project\", \"integration_type\": \"get\", \"entity_ids\": [\"baz\"] }", "if obsolete_fqids: log.debug('Ignoring obsolete bundle versions %r', obsolete_fqids) num_bundles =", "None: super().setUpClass() app_module = load_app_module('service') app_dir = os.path.dirname(app_module.__file__) factory =", "True delete = True if index: self._reset_indexer() catalogs: List[Catalog] =", "file %s'), ('debug', 'Loading object %s'), ('warning', 'Error accessing file'),", "class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase): prefix_length = 2 max_bundles = 64 min_timeout", "dss_client, replica, fallback=False) else: dss_client = azul.dss.client() self._test_dss_client(direct, query, dss_client,", "endpoint, path in (*service_routes, *health_routes): with self.subTest('other_endpoints', endpoint=endpoint, path=path): self._check_endpoint(endpoint,", "name: CatalogName notifications: Mapping[BundleFQID, JSON] @property def num_bundles(self): return len(self.notifications)", "local ES client pointing at a real, remote ES domain.", "# https://github.com/DataBiosphere/azul/issues/2399 @unittest.skipIf(True or config.is_main_deployment(), 'Test would pollute portal DB')", "db file. n_threads = 10 n_tasks = n_threads * 10", "with self.assertRaises(self.SpecialError): mini_dss.get_file(uuid, version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_native_file_url(uuid, version, 'aws')", "# default keys for lambda '/', # all keys '/basic',", "these two lines. index = True delete = True if", "hca.dss import ( DSSClient, ) from hca.util import ( SwaggerAPIException,", "catalog in catalogs: log.info('Starting integration test for catalog %r with", "three words of log # message logged. Note that the", "range(attempts): start = time.time() response = self._check_endpoint(config.service_endpoint(), '/manifest/files', params) log.info('Request", "bundle_fqids: AbstractSet[BundleFQID]) -> None: with self.subTest('catalog_complete', catalog=catalog): expected_fqids = set(self.azul_client.filter_obsolete_bundle_versions(bundle_fqids))", "hits for entity type %s on try #%i.', len(indexed_fqids), num_bundles,", "words of log # message logged. Note that the PyCharm", "log.debug('Ignoring obsolete bundle versions %r', obsolete_fqids) num_bundles = len(expected_fqids) timeout", "versions indexed by TDR self.assertNotIn(':', row['entity:participant_id']) suffix = '__file_drs_uri' header,", "( IndexService, ) from azul.logging import ( configure_test_logging, ) from", "sub-test [%s] %r', msg, params) def test(self): @attr.s(auto_attribs=True, kw_only=True) class", "shared by all catalogs and we can't afford to trash", "= self._get_one_file_uuid(catalog) self._test_dos(catalog, file_uuid) self._test_drs(catalog, file_uuid) @cached_property def _requests(self) ->", "max_bundles: int ) -> List[BundleFQID]: seed = self.pruning_seed log.info('Selecting %i", "file'), ('warning', 'Failed getting file') ] * len(metadata) else: expected", "from azul.indexer import ( BundleFQID, ) from azul.indexer.index_service import (", "%i bundles from prefix %r.', catalog, catalog.num_bundles, self.bundle_uuid_prefix) self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates())", "attempt in range(attempts): start = time.time() response = self._check_endpoint(config.service_endpoint(), '/manifest/files',", "@cached_property def azul_client(self): return AzulClient(prefix=self.bundle_uuid_prefix) class IndexingIntegrationTest(IntegrationTestCase, AlwaysTearDownTestCase): prefix_length =", "CatalogName, response: bytes): self.__check_manifest(BytesIO(response), 'bundle_uuid') def _check_terra_bdbag(self, catalog: CatalogName, response:", "len(indexed_fqids) > num_bundles: log.error('Found %i bundles, more than the expected", "else: # On `gcp` the precondition check fails right away,", "url = furl(endpoint, path=path, query=query) return self._get_url_content(url.url) def _get_url_content(self, url:", "host=cls.url.host, port=cls.url.port) cls.server_thread = threading.Thread(target=cls.server.serve_forever) cls.server_thread.start() @classmethod def tearDownClass(cls) ->", "20 * 60 @classmethod def setUpClass(cls) -> None: super().setUpClass() cls.bundle_uuid_prefix", "@lru_cache(maxsize=None) def _get_one_file_uuid(self, catalog: CatalogName) -> str: filters = {'fileFormat':", "for bundle_fqid in bundle_fqids } def _prune_test_bundles(self, catalog: CatalogName, bundle_fqids:", "with ThreadPoolExecutor(max_workers=n_threads) as executor: futures = [executor.submit(run, i) for i", "'/progress', '/api_endpoints', '/other_lambdas' ) health_routes = ( (endpoint, '/health' +", "any attempts of direct access expected = [ ('warning', 'Failed", "self._get_entities(catalog, entity_type) indexed_fqids.update( BundleFQID(bundle['bundleUuid'], bundle['bundleVersion']) for hit in hits for", "this token is the same as # the one we're", "expected %i.', len(indexed_fqids), num_bundles) break elif time.time() > deadline: log.error('Only", "_, manifest, metadata = download_bundle_metadata(client=dss_client, replica=replica, uuid=bundle_uuid, version=bundle_version, num_workers=config.num_dss_workers) log.info('Captured", "import ( ABCMeta, ) from concurrent.futures.thread import ( ThreadPoolExecutor, )", "] + [ ('debug', 'Loading file %s'), ('debug', 'Loading object", "\"project\", \"integration_type\": \"get\", \"entity_ids\": [\"baz\"] } ], \"mock-count\": entry_format.format(thread_count, op_count)", "def subTest(self, msg: Any = None, **params: Any): log.info('Beginning sub-test", "import attr import chalice.cli from furl import ( furl, )", "row in rows: # Terra doesn't allow colons in this", "0) # validate OpenAPI spec response = requests.get(service + '/openapi')", "filters=json.dumps(filters), size=1, order='asc', sort='fileSize')) hits = json.loads(response) return one(one(hits['hits'])['files'])['uuid'] def", "entities def _assert_indices_exist(self, catalog: CatalogName): \"\"\" Aside from checking that", "query = { \"query\": { \"bool\": { \"must_not\": [ {", ") from concurrent.futures.thread import ( ThreadPoolExecutor, ) from contextlib import", "zip_fh: data_path = os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data') file_path = os.path.join(data_path, 'participants.tsv') with", "in range(n_ops)}) # Reset to pre-test state. portal_service.overwrite(old_db) class OpenAPIIntegrationTest(AzulTestCase):", "10 n_tasks = n_threads * 10 n_ops = 5 portal_service", "service.index_names(catalog): self.assertTrue(es_client.indices.exists(index_name)) class AzulClientIntegrationTest(IntegrationTestCase): def test_azul_client_error_handling(self): invalid_notification = {} notifications", "= 'task={};op={}' def run(thread_count): for op_count in range(n_ops): mock_entry =", "Can't purge the queues in stable deployment as # they", "app_dir = os.path.dirname(app_module.__file__) factory = chalice.cli.factory.CLIFactory(app_dir) config = factory.create_config_obj() cls.server", "JSON, dss_client: DSSClient, replica: str, fallback: bool): with self.subTest(direct=direct, replica=replica,", "self._get_gs_url_content(access.url) else: self.fail(access_method) self._validate_fastq_content(content) def _test_dos(self, catalog: CatalogName, file_uuid: str):", "in health_paths ) for endpoint, path in (*service_routes, *health_routes): with", "elif len(indexed_fqids) > num_bundles: log.error('Found %i bundles, more than the", "op_count in range(n_ops): mock_entry = cast(JSON, { \"portal_id\": \"foo\", \"integrations\":", "else: self.fail(access_method) self._validate_fastq_content(content) def _test_dos(self, catalog: CatalogName, file_uuid: str): with", "more_itertools import ( first, one, ) from openapi_spec_validator import (", "in self.entity_types: with self.subTest('catalog_empty', catalog=catalog, entity_type=entity_type): hits = self._get_entities(catalog, entity_type)", "bundles %s ', sorted(expected_fqids)) retries = 0 deadline = time.time()", "str) -> List[Mapping[str, str]]: text = TextIOWrapper(file) reader = csv.DictReader(text,", "( ESClientFactory, ) from azul.indexer import ( BundleFQID, ) from", "= mock.MagicMock() mock_s3.get_object.side_effect = self.SpecialError() return mock_s3 else: return original(service,", "return one(one(hits['hits'])['files'])['uuid'] def _test_dos_and_drs(self, catalog: CatalogName): if config.is_dss_enabled(catalog) and config.dss_direct_access:", "for i in range(n_tasks)] self.assertTrue(all(f.result() is None for f in", "storage.Client(credentials=credentials) content = BytesIO() storage_client.download_blob_to_file(url, content) return content.getvalue() def _validate_fastq_content(self,", "= self._get_url_content(access.url) elif access.method is AccessMethod.gs: content = self._get_gs_url_content(access.url) else:", "cls.server_thread.join() super().tearDownClass() def test_local_chalice_health_endpoint(self): url = self.url.copy().set(path='health').url response = requests.get(url)", "for format_, validator, attempts in [ (None, self._check_manifest, 1), ('compact',", "{config.dss_direct_access, False}: for replica in 'aws', 'gcp': if direct: with", "azul.drs import ( AccessMethod, ) import azul.dss from azul.es import", "= requests.get(url) self.assertEqual(200, response.status_code) def test_local_filtered_index_endpoints(self): filters = {'genusSpecies': {'is':", "in [ (None, self._check_manifest, 1), ('compact', self._check_manifest, 1), ('full', self._check_manifest,", "os.path.dirname(app_module.__file__) factory = chalice.cli.factory.CLIFactory(app_dir) config = factory.create_config_obj() cls.server = factory.create_local_server(app_obj=app_module.app,", "catalog=catalog, format=format_, attempts=attempts): assert attempts > 0 params = dict(catalog=catalog)", "retries) if len(indexed_fqids) == num_bundles: log.info('Found the expected %i bundles.',", "- expected_fqids if obsolete_fqids: log.debug('Ignoring obsolete bundle versions %r', obsolete_fqids)", "query=dict(catalog=catalog, filters=json.dumps(filters), size=1, order='asc', sort='fileSize')) hits = json.loads(response) return one(one(hits['hits'])['files'])['uuid']", "# Index some bundles again to test that we handle", "Optional, Sequence, Tuple, cast, ) import unittest from unittest import", "+ [ ('debug', 'Loading file %s'), ('debug', 'Loading object %s'),", "for bundle_fqid in bundle_fqids: if len(filtered_bundle_fqids) < max_bundles: if self.azul_client.bundle_has_project_json(catalog,", "self.__check_manifest(BytesIO(response), 'bundle_uuid') def _check_terra_bdbag(self, catalog: CatalogName, response: bytes): with ZipFile(BytesIO(response))", "we know that this token is the same as #", "= [invalid_notification] self.assertRaises(AzulClientNotificationError, self.azul_client.index, first(config.integration_test_catalogs), notifications) class PortalRegistrationIntegrationTest(IntegrationTestCase): # FIXME:", "from azul.portal_service import ( PortalService, ) from azul.requests import (", "of %i bundles in %i hits for entity type %s", "= time.time() + timeout while True: hits = self._get_entities(catalog, entity_type)", "= ESClientFactory.get() service = IndexService() for index_name in service.index_names(catalog): self.assertTrue(es_client.indices.exists(index_name))", "from azul.requests import ( requests_session_with_retry_after, ) from azul.types import (", "= [ Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if index else {}) for catalog", "{config.dss_direct_access, False}: with self.subTest(direct=direct): dss_client = azul.dss.direct_access_client() if direct else", "= requests.get(service + '/') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['content-type'], 'text/html') self.assertGreater(len(response.content), 0)", "from azul.es import ( ESClientFactory, ) from azul.indexer import (", "def _get_entities(self, catalog: CatalogName, entity_type): entities = [] size =", "%s'), # file ('debug', 'Loading object %s') # blob ]", "return content.getvalue() def _validate_fastq_content(self, content: bytes): # Check signature of", "azul.portal_service import ( PortalService, ) from azul.requests import ( requests_session_with_retry_after,", "\"integration_id\": \"bar\", \"entity_type\": \"project\", \"integration_type\": \"get\", \"entity_ids\": [\"baz\"] } ],", "True: hits = self._get_entities(catalog, entity_type) indexed_fqids.update( BundleFQID(bundle['bundleUuid'], bundle['bundleVersion']) for hit", "\"must_not\": [ { \"term\": { \"admin_deleted\": True } } ],", "seconds and creates a 25 kb db file. n_threads =", "for endpoint, path in (*service_routes, *health_routes): with self.subTest('other_endpoints', endpoint=endpoint, path=path):", "@property def bundle_fqids(self) -> AbstractSet[BundleFQID]: return self.notifications.keys() def notifications_with_duplicates(self) ->", "self._get_entities(catalog, entity_type) self.assertEqual([], [hit['entryId'] for hit in hits]) def _get_entities(self,", "spec = response.json() validate_spec(spec) class DSSIntegrationTest(AzulTestCase): def test_patched_dss_client(self): query =", "for entity type %s on try #%i.', len(indexed_fqids), num_bundles, len(hits),", "] size, drs_uri, name = min( ( int(row[prefix + '__file_size']),", "= [executor.submit(run, i) for i in range(n_tasks)] self.assertTrue(all(f.result() is None", "%s') ] + [ ('debug', 'Loading file %s'), ('debug', 'Loading", "log.info('Detected %i of %i bundles in %i hits for entity", "configure_test_logging(log) class IntegrationTestCase(AzulTestCase, metaclass=ABCMeta): bundle_uuid_prefix: str = '' @cached_property def", "( int(row[prefix + '__file_size']), row[prefix + suffix], row[prefix + '__file_name'],", "should do but for TDR we need to use an", "{ \"must_not\": [ { \"term\": { \"admin_deleted\": True } }", "list. bundle_fqids = sorted(bundle_fqids) random_.shuffle(bundle_fqids) # Pick bundles off of", "[\"baz\"] } ], \"mock-count\": entry_format.format(thread_count, op_count) }) portal_service._crud(lambda db: list(db)", "def _validate_fastq_content(self, content: bytes): # Check signature of FASTQ file.", "None, **params: Any): log.info('Beginning sub-test [%s] %r', msg, params) with", "len(indexed_fqids) == num_bundles: log.info('Found the expected %i bundles.', num_bundles) break", "timeout) break else: retries += 1 time.sleep(5) self.assertSetEqual(indexed_fqids, expected_fqids) entity_types", "PortalService() entry_format = 'task={};op={}' def run(thread_count): for op_count in range(n_ops):", "= [ ('debug', 'Loading bundle %s'), ('debug', 'Loading object %s'),", "import ( ESClientFactory, ) from azul.indexer import ( BundleFQID, )", "for direct in {config.dss_direct_access, False}: for replica in 'aws', 'gcp':", "'/openapi', '/version', '/index/summary', '/index/files/order', ) service_routes = ( (config.service_endpoint(), path)", "JSON]: bundle_fqids = self.azul_client.list_bundles(catalog) bundle_fqids = self._prune_test_bundles(catalog, bundle_fqids, self.max_bundles) return", "'aws') with self.assertRaises(self.SpecialError): mini_dss.get_file(uuid, version, 'aws') with self.assertRaises(self.SpecialError): mini_dss.get_native_file_url(uuid, version,", "client. TDR does return a Bearer token in the `headers`", "for TDR we need to use an # authenticated client.", "all keys '/basic', '/elasticsearch', '/queues', '/progress', '/api_endpoints', '/other_lambdas' ) health_routes", "= sorted(bundle_fqids) random_.shuffle(bundle_fqids) # Pick bundles off of the randomly", "rows.') self.assertGreater(len(rows), 0) self.assertIn(uuid_field_name, reader.fieldnames) bundle_uuid = rows[0][uuid_field_name] self.assertEqual(bundle_uuid, str(uuid.UUID(bundle_uuid)))", "desired number of bundles with project metadata. filtered_bundle_fqids = []", "AzulClient, AzulClientNotificationError, ) from azul.drs import ( AccessMethod, ) import", "import ( AzulClient, AzulClientNotificationError, ) from azul.drs import ( AccessMethod,", "config=config, host=cls.url.host, port=cls.url.port) cls.server_thread = threading.Thread(target=cls.server.serve_forever) cls.server_thread.start() @classmethod def tearDownClass(cls)", "*health_routes): with self.subTest('other_endpoints', endpoint=endpoint, path=path): self._check_endpoint(endpoint, path) def _test_manifest(self, catalog:", "= None @classmethod def setUpClass(cls) -> None: super().setUpClass() app_module =", "self._check_endpoint(config.service_endpoint(), path=drs.dos_object_url_path(file_uuid), query=dict(catalog=catalog)) json_data = json.loads(response)['data_object'] file_url = first(json_data['urls'])['url'] while", "hits]) def _get_entities(self, catalog: CatalogName, entity_type): entities = [] size", "health_endpoints = ( config.service_endpoint(), config.indexer_endpoint() ) health_paths = ( '',", "self.assertTrue(all(f.result() is None for f in futures)) new_db = portal_service.read()", "of DSS. for method in ('HEAD', 'GET'): log.info('%s %s', method,", "access = drs.get_object(drs_uri, access_method=access_method) self.assertIsNone(access.headers) if access.method is AccessMethod.https: content", "'Loading object %s'), ('warning', 'Error accessing bundle'), ('warning', 'Failed getting", "random seed %i.', max_bundles, len(bundle_fqids), seed) random_ = random.Random(x=seed) #", "Azul's DRS proxy of DSS. for method in ('HEAD', 'GET'):", "import ( download_bundle_metadata, ) from more_itertools import ( first, one,", ") from more_itertools import ( first, one, ) from openapi_spec_validator", "response but we know that this token is the same", "pass def _failing_s3_get_object(self): def make_mock(**kwargs): original = kwargs['spec'] def mock_boto3_client(service,", "-> str: filters = {'fileFormat': {'is': ['fastq.gz', 'fastq']}} response =", "Any]] = None) -> bytes: query = {} if query", "self.azul_client.repository_plugin(catalog) drs_client = plugin.drs_client() access = drs_client.get_object(drs_uri, access_method=AccessMethod.https) self.assertIsNone(access.headers) self.assertEqual('https',", "azul.dss.direct_access_client() if direct else azul.dss.client() with self.assertRaises(SwaggerAPIException) as e: dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1',", "= self.__check_manifest(file, 'bundle_uuid') for row in rows: # Terra doesn't", "chalice.cli.factory.CLIFactory(app_dir) config = factory.create_config_obj() cls.server = factory.create_local_server(app_obj=app_module.app, config=config, host=cls.url.host, port=cls.url.port)", "not in portal] self.assertEqual(old_entries, old_db) mock_counts = [portal['mock-count'] for portal", "queues in stable deployment as # they may contain work", "path) for endpoint in health_endpoints for path in health_paths )", "we can't afford to trash them in a stable #", "or more times. notifications.extend(random.choices(notifications, k=num_duplicates)) return notifications def _wait_for_indexer(): num_bundles", "original(service, *args, **kwargs) return mock_boto3_client return mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock) def", "zipfile import ( ZipFile, ) import attr import chalice.cli from", "import gzip from io import ( BytesIO, TextIOWrapper, ) import", "super().setUp() self.pruning_seed = random.randint(0, sys.maxsize) @contextmanager def subTest(self, msg: Any", "import ( configure_test_logging, ) from azul.modules import ( load_app_module, )", "from first run. Don't commit changes to these two lines.", "log.error('Found %i bundles, more than the expected %i.', len(indexed_fqids), num_bundles)", "{ \"term\": { \"admin_deleted\": True } } ], \"must\": [", "return mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock) def _test_dss_client(self, direct: bool, query: JSON,", "] * len(metadata) else: # On `gcp` the precondition check", "# For DSS, any HTTP client should do but for", "obsolete_fqids: log.debug('Ignoring obsolete bundle versions %r', obsolete_fqids) num_bundles = len(expected_fqids)", "content.getvalue() def _validate_fastq_content(self, content: bytes): # Check signature of FASTQ", "for catalog in catalogs: self._test_manifest(catalog.name) self._test_dos_and_drs(catalog.name) self._test_repository_files(catalog.name) if index and", "e: dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1', version='2018-11-19T232756.056947Z', replica='aws') self.assertEqual(e.exception.reason, 'not_found') def test_mini_dss_failures(self): uuid =", "multithreading to simulate multiple users simultaneously modifying the portals database.", "'/openapi') response.raise_for_status() spec = response.json() validate_spec(spec) class DSSIntegrationTest(AzulTestCase): def test_patched_dss_client(self):", "the same element multiple times so # some notifications will", "url: str, allow_redirects=True) -> requests.Response: log.info('GET %s', url) response =", "num_bundles(self): return len(self.notifications) @property def bundle_fqids(self) -> AbstractSet[BundleFQID]: return self.notifications.keys()", "like production. self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs, # Can't purge the queues in stable", "mock_boto3_client return mock.patch('azul.deployment.aws.client', spec=True, new_callable=make_mock) def _test_dss_client(self, direct: bool, query:", "query=dict(catalog=self.catalog)).url response = requests.get(url) self.assertEqual(200, response.status_code) def test_local_filtered_index_endpoints(self): filters =", "CatalogName notifications: Mapping[BundleFQID, JSON] @property def num_bundles(self): return len(self.notifications) @property", "bundles again to test that we handle duplicate additions. #", "catalogs: List[Catalog] = [ Catalog(name=catalog, notifications=self._prepare_notifications(catalog) if index else {})", "302) self._assertResponseStatus(response, expected_statuses) return response def _assertResponseStatus(self, response: requests.Response, expected_statuses:", "# On `gcp` the precondition check fails right away, preventing", "bool): with self.subTest(direct=direct, replica=replica, fallback=fallback): response = dss_client.post_search(es_query=query, replica=replica, per_page=10)", "301, 302) self._assertResponseStatus(response, expected_statuses) return response def _assertResponseStatus(self, response: requests.Response,", "_validate_fastq_content(self, content: bytes): # Check signature of FASTQ file. with", "-> List[JSON]: num_duplicates = self.num_bundles // 2 notifications = list(self.notifications.values())", "notifications: Mapping[BundleFQID, JSON] @property def num_bundles(self): return len(self.notifications) @property def", "delete: for catalog in catalogs: self.azul_client.index(catalog=catalog.name, notifications=catalog.notifications_with_duplicates(), delete=True) _wait_for_indexer() for", "bundle_fqids = sorted(bundle_fqids) random_.shuffle(bundle_fqids) # Pick bundles off of the", "call # certain dunder methods on the variable, leading to", "self.url.copy().set(path='index/files', query=dict(catalog=self.catalog)).url response = requests.get(url) self.assertEqual(200, response.status_code) def test_local_filtered_index_endpoints(self): filters", "in {config.dss_direct_access, False}: with self.subTest(direct=direct): dss_client = azul.dss.direct_access_client() if direct", "= list(reader) log.info(f'Manifest contains {len(rows)} rows.') self.assertGreater(len(rows), 0) self.assertIn(uuid_field_name, reader.fieldnames)", "uuid from zipfile import ( ZipFile, ) import attr import", "None for f in futures)) new_db = portal_service.read() old_entries =", "tearDownClass(cls) -> None: cls.server.shutdown() cls.server_thread.join() super().tearDownClass() def test_local_chalice_health_endpoint(self): url =", "we can instantiate a local ES client pointing at a", "this method also asserts that we can instantiate a local", "for m, a, k in captured_log.mock_calls] if direct: if replica", "prohibits it, like Azul's DRS proxy of DSS. for method", "path=('index', entity_type), query_params=params ).url while True: response = self._get_url(url) body", "Reset to pre-test state. portal_service.overwrite(old_db) class OpenAPIIntegrationTest(AzulTestCase): def test_openapi(self): service", "self._assertResponseStatus(response) self._validate_fastq_content(response.content) def _get_gs_url_content(self, url: str) -> bytes: self.assertTrue(url.startswith('gs://')) path", "= self.url.copy().set(path='index/files', query=dict(catalog=self.catalog)).url response = requests.get(url) self.assertEqual(200, response.status_code) def test_local_filtered_index_endpoints(self):", "test_concurrent_portal_db_crud(self): \"\"\" Use multithreading to simulate multiple users simultaneously modifying", "url: str) -> bytes: self.assertTrue(url.startswith('gs://')) path = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] credentials =", "= azul.dss.MiniDSS(config.dss_endpoint) with self.assertRaises(self.SpecialError): mini_dss._get_file_object(uuid, version) with self.assertRaises(KeyError): mini_dss._get_blob_key({}) with", "accessing file'), ('warning', 'Failed getting file') ] * len(metadata) else:", "a[0])[:3])) for m, a, k in captured_log.mock_calls] if direct: if", "we're making the DRS request with. response = drs_client.http_client.request(method, access.url)", "import logging import os import random import re import sys", "else azul.dss.client() with self.assertRaises(SwaggerAPIException) as e: dss_client.get_file(uuid='acafefed-beef-4bad-babe-feedfa11afe1', version='2018-11-19T232756.056947Z', replica='aws') self.assertEqual(e.exception.reason,", "= 64 min_timeout = 20 * 60 @classmethod def setUpClass(cls)", "self._test_other_endpoints() def _reset_indexer(self): # While it's OK to erase the", "found %i of %i bundles in under %i seconds.', len(indexed_fqids),", "import ( lru_cache, ) import gzip from io import (", "('debug', 'Loading object %s') # blob ] * len(metadata) else:", "contextlib import ( contextmanager, ) import csv from functools import", "in portal] self.assertEqual(len(mock_counts), len(set(mock_counts))) self.assertEqual(set(mock_counts), {entry_format.format(i, j) for i in", "filtered_bundle_fqids = [] for bundle_fqid in bundle_fqids: if len(filtered_bundle_fqids) <", "lines = fastq.splitlines() # Assert first character of first and", "catalog in config.integration_test_catalogs ] if index: for catalog in catalogs:", "-> None: cls.server.shutdown() cls.server_thread.join() super().tearDownClass() def test_local_chalice_health_endpoint(self): url = self.url.copy().set(path='health').url", "for j in range(n_ops)}) # Reset to pre-test state. portal_service.overwrite(old_db)", "= os.path.join(os.path.dirname(first(zip_fh.namelist())), 'data') file_path = os.path.join(data_path, 'participants.tsv') with zip_fh.open(file_path) as", "list(self.notifications.values()) # Index some bundles again to test that we", ") from azul.drs import ( AccessMethod, ) import azul.dss from", "response: bytes): self.__check_manifest(BytesIO(response), 'bundle_uuid') def _check_terra_bdbag(self, catalog: CatalogName, response: bytes):", "off of the randomly ordered input until we have the", "\"integration_type\": \"get\", \"entity_ids\": [\"baz\"] } ], \"mock-count\": entry_format.format(thread_count, op_count) })", "break elif time.time() > deadline: log.error('Only found %i of %i", "with self.assertRaises(self.SpecialError): mini_dss._get_file_object(uuid, version) with self.assertRaises(KeyError): mini_dss._get_blob_key({}) with self.assertRaises(self.SpecialError): mini_dss._get_blob('/blobs/foo',", "self._check_endpoint(endpoint=config.service_endpoint(), path=f'/fetch/repository/files/{file_uuid}', query=dict(catalog=catalog)) response = json.loads(response) while response['Status'] != 302:", "self._get_one_file_uuid(catalog) response = self._check_endpoint(endpoint=config.service_endpoint(), path=f'/fetch/repository/files/{file_uuid}', query=dict(catalog=catalog)) response = json.loads(response) while", "( PortalService, ) from azul.requests import ( requests_session_with_retry_after, ) from", "n_ops = 5 portal_service = PortalService() entry_format = 'task={};op={}' def", "all indices exist this method also asserts that we can", "self.assertEqual(response.headers['content-type'], 'text/html') self.assertGreater(len(response.content), 0) # validate OpenAPI spec response =", "notifications will end up being sent three or more times.", "of S3 API is resolved # https://github.com/DataBiosphere/azul/issues/2399 @unittest.skipIf(True or config.is_main_deployment(),", "= json.loads(response) return one(one(hits['hits'])['files'])['uuid'] def _test_dos_and_drs(self, catalog: CatalogName): if config.is_dss_enabled(catalog)", "@contextmanager def subTest(self, msg: Any = None, **params: Any): log.info('Beginning", "queues are # shared by all catalogs and we can't", "( SwaggerAPIException, ) from humancellatlas.data.metadata.helpers.dss import ( download_bundle_metadata, ) from", "access_method in AccessMethod: with self.subTest('drs', catalog=catalog, access_method=AccessMethod.https): log.info('Resolving file %r", "{'is': ['Homo sapiens']}} url = self.url.copy().set(path='index/files', query=dict(filters=json.dumps(filters), catalog=self.catalog)).url response =", "( service_account, ) from hca.dss import ( DSSClient, ) from", "[ ('warning', 'Failed getting file') ] * len(metadata) else: expected", "_requests(self) -> requests.Session: return requests_session_with_retry_after() def _check_endpoint(self, endpoint: str, path:", "so we can log each request if response.status_code in (301,", "db: list(db) + [mock_entry]) old_db = portal_service.read() with ThreadPoolExecutor(max_workers=n_threads) as", "# catalogs from first run. Don't commit changes to these", "notifications_with_duplicates(self) -> List[JSON]: num_duplicates = self.num_bundles // 2 notifications =", "('warning', 'Failed getting file') ] * len(metadata) else: expected =", "client pointing at a real, remote ES domain. \"\"\" es_client", "# the one we're making the DRS request with. response", "multiple users simultaneously modifying the portals database. \"\"\" # Currently", "factory = chalice.cli.factory.CLIFactory(app_dir) config = factory.create_config_obj() cls.server = factory.create_local_server(app_obj=app_module.app, config=config,", "and we can't afford to trash them in a stable", "timeout while True: hits = self._get_entities(catalog, entity_type) indexed_fqids.update( BundleFQID(bundle['bundleUuid'], bundle['bundleVersion'])", "can instantiate a local ES client pointing at a real,", "self.subTest('manifest', catalog=catalog, format=format_, attempts=attempts): assert attempts > 0 params =", "params = dict(catalog=catalog) if format_ is not None: params['format'] =", "cls.server_thread = threading.Thread(target=cls.server.serve_forever) cls.server_thread.start() @classmethod def tearDownClass(cls) -> None: cls.server.shutdown()", "('warning', 'Failed getting bundle') ] + [ ('debug', 'Loading file", "cast(JSON, { \"portal_id\": \"foo\", \"integrations\": [ { \"integration_id\": \"bar\", \"entity_type\":", "return self._get_url(url).content def _get_url(self, url: str, allow_redirects=True) -> requests.Response: log.info('GET", "('debug', 'Loading bundle %s'), ('debug', 'Loading object %s'), ('warning', 'Error", "doesn't allow colons in this column, but they may #", "= random.Random(x=seed) # The same seed should give same random", "is AccessMethod.gs: content = self._get_gs_url_content(access.url) else: self.fail(access_method) self._validate_fastq_content(content) def _test_dos(self,", "\"mock-count\": entry_format.format(thread_count, op_count) }) portal_service._crud(lambda db: list(db) + [mock_entry]) old_db", "else: expected = [ ('debug', 'Loading bundle %s'), ('debug', 'Loading", "ESClientFactory.get() service = IndexService() for index_name in service.index_names(catalog): self.assertTrue(es_client.indices.exists(index_name)) class", "service_routes = ( (config.service_endpoint(), path) for path in service_paths )", "DOS', file_uuid) response = self._check_endpoint(config.service_endpoint(), path=drs.dos_object_url_path(file_uuid), query=dict(catalog=catalog)) json_data = json.loads(response)['data_object']", "response = self._get_url(response['Location']).json() content = self._get_url_content(response['Location']) self._validate_fastq_content(content) def _test_drs(self, catalog:", "can log each request if response.status_code in (301, 302): file_url", "request with. response = drs_client.http_client.request(method, access.url) if response.status != 403:", "as buf: fastq = buf.read(1024 * 1024) lines = fastq.splitlines()", "test_mini_dss_failures(self): uuid = 'acafefed-beef-4bad-babe-feedfa11afe1' version = '2018-11-19T232756.056947Z' with self._failing_s3_get_object(): mini_dss", "drs = repository_plugin.drs_client() for access_method in AccessMethod: with self.subTest('drs', catalog=catalog,", "i in range(n_tasks) for j in range(n_ops)}) # Reset to", "= service_account.Credentials.from_service_account_file(path) storage_client = storage.Client(credentials=credentials) content = BytesIO() storage_client.download_blob_to_file(url, content)", "= None for direct in {config.dss_direct_access, False}: for replica in", "also set `index` to False. Subsequent runs will use #", "_check_terra_bdbag(self, catalog: CatalogName, response: bytes): with ZipFile(BytesIO(response)) as zip_fh: data_path", "fall back to GET if the # DRS implementations prohibits", "import ( AlwaysTearDownTestCase, AzulTestCase, ) log = logging.getLogger(__name__) # noinspection", "= n_threads * 10 n_ops = 5 portal_service = PortalService()", "else (200, 301, 302) self._assertResponseStatus(response, expected_statuses) return response def _assertResponseStatus(self,", "a stable # deployment like production. self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs, # Can't purge", ").url while True: response = self._get_url(url) body = response.json() hits", "replica=replica, per_page=10) bundle_uuid, _, bundle_version = response['results'][0]['bundle_fqid'].partition('.') with mock.patch('azul.dss.logger') as", "seed) random_ = random.Random(x=seed) # The same seed should give", "variable, leading to failed # assertions. actual = [(m, '", "stable # deployment like production. self.azul_client.reset_indexer(catalogs=config.integration_test_catalogs, # Can't purge the", "return { bundle_fqid: self.azul_client.synthesize_notification(catalog, bundle_fqid) for bundle_fqid in bundle_fqids }", "log # message logged. Note that the PyCharm debugger will", "query=dict(catalog=catalog)) json_data = json.loads(response)['data_object'] file_url = first(json_data['urls'])['url'] while True: response", "captured_log.mock_calls] if direct: if replica == 'aws': if fallback: expected", "('debug', 'Loading bundle %s'), ('debug', 'Loading object %s') ] +", "else: time.sleep(int(retry_after)) else: break self._assertResponseStatus(response) self._validate_fastq_content(response.content) def _get_gs_url_content(self, url: str)", "proxy of DSS. for method in ('HEAD', 'GET'): log.info('%s %s',", "Catalog: name: CatalogName notifications: Mapping[BundleFQID, JSON] @property def num_bundles(self): return", "duplicate additions. # Note: random.choices() may pick the same element", "[ { \"term\": { \"admin_deleted\": True } } ], \"must\":", "index: for catalog in catalogs: log.info('Starting integration test for catalog", "log method name and the first three words of log", "AccessMethod: with self.subTest('drs', catalog=catalog, access_method=AccessMethod.https): log.info('Resolving file %r with DRS", "the expected %i bundles.', num_bundles) break elif len(indexed_fqids) > num_bundles:", "GET if the # DRS implementations prohibits it, like Azul's", "checking that all indices exist this method also asserts that", "modify-deploy-test cycles, set `delete` to False and run # test", "port=cls.url.port) cls.server_thread = threading.Thread(target=cls.server.serve_forever) cls.server_thread.start() @classmethod def tearDownClass(cls) -> None:", "in service_paths ) health_endpoints = ( config.service_endpoint(), config.indexer_endpoint() ) health_paths", "import unittest from unittest import ( mock, ) import uuid", "response['results'][0]['bundle_fqid'].partition('.') with mock.patch('azul.dss.logger') as captured_log: _, manifest, metadata = download_bundle_metadata(client=dss_client,", "catalog: CatalogName, file_uuid: str): with self.subTest('dos', catalog=catalog): log.info('Resolving file %s", "bundle_fqid in bundle_fqids } def _prune_test_bundles(self, catalog: CatalogName, bundle_fqids: Sequence[BundleFQID],", "import ( ZipFile, ) import attr import chalice.cli from furl", "getting file') ] * len(metadata) else: expected = [ ('debug',", "] } } } self.maxDiff = None for direct in", "= drs_client.get_object(drs_uri, access_method=AccessMethod.https) self.assertIsNone(access.headers) self.assertEqual('https', furl(access.url).scheme) # Try HEAD first" ]
[ "id: str :param custom_query_name: Name of the custom query :type", "updated :type data_transformation: dict | bytes :rtype: DataTransformation \"\"\" if", "from openapi_server import util def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): # noqa:", "information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param username: Name of", "query :type username: str :rtype: List[DataTransformation] \"\"\" return query_manager.get_resource(id=id, custom_query_name=custom_query_name,", "Name of the user graph to query :type username: str", "DataTransformation Updates an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) #", "data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return query_manager.put_resource(id=id, user=user, body=data_transformation,", "query_manager.get_resource( username=username, label=label, page=page, per_page=per_page, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_delete(id,", "the DataTransformationto be created :type data_transformation: dict | bytes :rtype:", "(more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param username: Name", "single DataTransformation by its id Gets the details of a", "related a dataset Gets a list of data transformations related", "retrieved :type id: str :param username: Name of the user", "datatransformations_id_delete(id, user=None): # noqa: E501 \"\"\"Delete an existing DataTransformation Delete", "datatransformations_id_put(id, user=None, data_transformation=None): # noqa: E501 \"\"\"Update an existing DataTransformation", "query_manager.delete_resource(id=id, user=user, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_get(id, username=None): # noqa:", "transformations related a dataset # noqa: E501 :param id: The", "id: The ID of the dataspecification :type id: str :param", "a list of data transformations related a dataset Gets a", "kls=DataTransformation) def datatransformations_id_get(id, username=None): # noqa: E501 \"\"\"Get a single", "DataTransformation # noqa: E501 from openapi_server import util def custom_datasetspecifications_id_datatransformations_get(id,", "be retrieved :type id: str :param user: Username :type user:", "= DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return query_manager.post_resource( user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI,", "E501 \"\"\"List all instances of DataTransformation Gets a list of", "a single DataTransformation by its id Gets the details of", "Username :type user: str :rtype: None \"\"\" return query_manager.delete_resource(id=id, user=user,", "Page number :type page: int :param per_page: Items per page", "return query_manager.put_resource(id=id, user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_post(user=None, data_transformation=None):", "user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_post(user=None, data_transformation=None): # noqa:", "# noqa: E501 :param id: The ID of the dataspecification", "import connexion import six from openapi_server import query_manager from openapi_server.utils.vars", "username=username, label=label, page=page, per_page=per_page, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_delete(id, user=None):", "dataspecification :type id: str :param custom_query_name: Name of the custom", "str :rtype: List[DataTransformation] \"\"\" return query_manager.get_resource(id=id, custom_query_name=custom_query_name, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME,", "user=user, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_get(id, username=None): # noqa: E501", "https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param id: The ID of the", "instances of DataTransformation Gets a list of all instances of", "E501 from openapi_server import util def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): #", ":param username: Username to query :type username: str :rtype: List[DataTransformation]", "of the DataTransformation to be retrieved :type id: str :param", "user: str :param data_transformation: Information about the DataTransformationto be created", "new instance of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa:", "# noqa: E501 \"\"\"Delete an existing DataTransformation Delete an existing", "user: Username :type user: str :param data_transformation: Information about the", "page=page, per_page=per_page, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_delete(id, user=None): # noqa:", "import util def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): # noqa: E501 \"\"\"Gets", "from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI from openapi_server.models.data_transformation import DataTransformation #", "DATATRANSFORMATION_TYPE_URI from openapi_server.models.data_transformation import DataTransformation # noqa: E501 from openapi_server", "custom_query_name=None, username=None): # noqa: E501 \"\"\"Gets a list of data", "rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_put(id, user=None, data_transformation=None): # noqa: E501 \"\"\"Update", "Username to query :type username: str :rtype: List[DataTransformation] \"\"\" return", "DataTransformation to be retrieved :type id: str :param username: Name", "return query_manager.get_resource(id=id, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_put(id, user=None, data_transformation=None):", "import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI from openapi_server.models.data_transformation import DataTransformation # noqa: E501", "an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501", ":type user: str :param data_transformation: Information about the DataTransformationto be", "custom_query_name: Name of the custom query :type custom_query_name: str :param", "kls=DataTransformation) def datatransformations_id_delete(id, user=None): # noqa: E501 \"\"\"Delete an existing", ":type custom_query_name: str :param username: Username to query :type username:", "to query :type username: str :rtype: DataTransformation \"\"\" return query_manager.get_resource(id=id,", "# noqa: E501 \"\"\"Get a single DataTransformation by its id", "details of a given DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) #", ":type label: str :param page: Page number :type page: int", "list of data transformations related a dataset # noqa: E501", "DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param id:", "datatransformations_id_get(id, username=None): # noqa: E501 \"\"\"Get a single DataTransformation by", "noqa: E501 \"\"\"Delete an existing DataTransformation Delete an existing DataTransformation", "to query :type username: str :param label: Filter by label", "DataTransformationto be created :type data_transformation: dict | bytes :rtype: DataTransformation", "connexion.request.is_json: data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return query_manager.put_resource(id=id, user=user,", "str :param data_transformation: Information about the DataTransformationto be created :type", ":param data_transformation: An old DataTransformationto be updated :type data_transformation: dict", "about the DataTransformationto be created :type data_transformation: dict | bytes", ":type username: str :rtype: DataTransformation \"\"\" return query_manager.get_resource(id=id, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI,", ":param username: Name of the user graph to query :type", "Filter by label :type label: str :param page: Page number", "kls=DataTransformation) def datatransformations_get(username=None, label=None, page=None, per_page=None): # noqa: E501 \"\"\"List", ":param data_transformation: Information about the DataTransformationto be created :type data_transformation:", "of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param", "label=None, page=None, per_page=None): # noqa: E501 \"\"\"List all instances of", "return query_manager.get_resource(id=id, custom_query_name=custom_query_name, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_get(username=None, label=None,", "rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_put(id, user=None, data_transformation=None): # noqa: E501", "DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return query_manager.post_resource( user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME,", "Gets a list of data transformations related a dataset #", "query_manager.get_resource(id=id, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_put(id, user=None, data_transformation=None): #", ":type username: str :rtype: List[DataTransformation] \"\"\" return query_manager.get_resource(id=id, custom_query_name=custom_query_name, username=username,", "E501 \"\"\"Gets a list of data transformations related a dataset", "label: Filter by label :type label: str :param page: Page", "per_page: Items per page :type per_page: int :rtype: List[DataTransformation] \"\"\"", "instances of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501", "data_transformation=None): # noqa: E501 \"\"\"Update an existing DataTransformation Updates an", ":param user: Username :type user: str :param data_transformation: Information about", "an existing DataTransformation Delete an existing DataTransformation (more information in", "body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_post(user=None, data_transformation=None): # noqa: E501", "existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param", "data_transformation: dict | bytes :rtype: DataTransformation \"\"\" if connexion.request.is_json: data_transformation", "user graph to query :type username: str :rtype: DataTransformation \"\"\"", "return query_manager.get_resource( username=username, label=label, page=page, per_page=per_page, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def", "\"\"\"List all instances of DataTransformation Gets a list of all", "DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return query_manager.put_resource(id=id, user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME,", "# noqa: E501 \"\"\"List all instances of DataTransformation Gets a", "transformations related a dataset Gets a list of data transformations", "\"\"\" if connexion.request.is_json: data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return", "query_manager.put_resource(id=id, user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_post(user=None, data_transformation=None): #", "a list of all instances of DataTransformation (more information in", "DataTransformation Create a new instance of DataTransformation (more information in", "in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param user: Username :type user:", "user: str :rtype: None \"\"\" return query_manager.delete_resource(id=id, user=user, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME,", "# noqa: E501 :param username: Name of the user graph", "= DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return query_manager.put_resource(id=id, user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI,", "list of data transformations related a dataset Gets a list", "datatransformations_post(user=None, data_transformation=None): # noqa: E501 \"\"\"Create one DataTransformation Create a", "be created :type data_transformation: dict | bytes :rtype: DataTransformation \"\"\"", "DataTransformationto be updated :type data_transformation: dict | bytes :rtype: DataTransformation", "id: str :param username: Name of the user graph to", "str :param username: Name of the user graph to query", "a dataset # noqa: E501 :param id: The ID of", "DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param username:", "# noqa: E501 :param id: The ID of the DataTransformation", "existing DataTransformation Updates an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation)", "query :type username: str :rtype: DataTransformation \"\"\" return query_manager.get_resource(id=id, username=username,", "existing DataTransformation Delete an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation)", ":type data_transformation: dict | bytes :rtype: DataTransformation \"\"\" if connexion.request.is_json:", "dataset Gets a list of data transformations related a dataset", "rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_get(username=None, label=None, page=None, per_page=None): # noqa:", "DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI from openapi_server.models.data_transformation import DataTransformation # noqa: E501 from", "the dataspecification :type id: str :param custom_query_name: Name of the", "of data transformations related a dataset Gets a list of", "return query_manager.delete_resource(id=id, user=user, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_get(id, username=None): #", "page=None, per_page=None): # noqa: E501 \"\"\"List all instances of DataTransformation", "str :param page: Page number :type page: int :param per_page:", "DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param user:", "str :param data_transformation: An old DataTransformationto be updated :type data_transformation:", "custom_query_name: str :param username: Username to query :type username: str", "the custom query :type custom_query_name: str :param username: Username to", "rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_get(username=None, label=None, page=None, per_page=None): # noqa: E501", "data_transformation: An old DataTransformationto be updated :type data_transformation: dict |", "The ID of the DataTransformation to be retrieved :type id:", "E501 :param user: Username :type user: str :param data_transformation: Information", "str :param username: Username to query :type username: str :rtype:", "noqa: E501 return query_manager.put_resource(id=id, user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def", "page: int :param per_page: Items per page :type per_page: int", "user: str :param data_transformation: An old DataTransformationto be updated :type", "username=None): # noqa: E501 \"\"\"Gets a list of data transformations", "def datatransformations_post(user=None, data_transformation=None): # noqa: E501 \"\"\"Create one DataTransformation Create", "of DataTransformation Gets a list of all instances of DataTransformation", "str :param custom_query_name: Name of the custom query :type custom_query_name:", "of the custom query :type custom_query_name: str :param username: Username", "str :rtype: DataTransformation \"\"\" return query_manager.get_resource(id=id, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation)", "query_manager.get_resource(id=id, custom_query_name=custom_query_name, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_get(username=None, label=None, page=None,", "the DataTransformation to be retrieved :type id: str :param username:", "E501 \"\"\"Update an existing DataTransformation Updates an existing DataTransformation (more", ":param id: The ID of the dataspecification :type id: str", "data_transformation=None): # noqa: E501 \"\"\"Create one DataTransformation Create a new", "Gets a list of all instances of DataTransformation (more information", "# noqa: E501 \"\"\"Gets a list of data transformations related", "from openapi_server.models.data_transformation import DataTransformation # noqa: E501 from openapi_server import", "An old DataTransformationto be updated :type data_transformation: dict | bytes", "rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_delete(id, user=None): # noqa: E501 \"\"\"Delete an", ":rtype: List[DataTransformation] \"\"\" return query_manager.get_resource(id=id, custom_query_name=custom_query_name, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation)", "bytes :rtype: DataTransformation \"\"\" if connexion.request.is_json: data_transformation = DataTransformation.from_dict(connexion.request.get_json()) #", "username: str :rtype: DataTransformation \"\"\" return query_manager.get_resource(id=id, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME,", "per_page=None): # noqa: E501 \"\"\"List all instances of DataTransformation Gets", "Items per page :type per_page: int :rtype: List[DataTransformation] \"\"\" return", "six from openapi_server import query_manager from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI", "a given DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501", "user=None, data_transformation=None): # noqa: E501 \"\"\"Update an existing DataTransformation Updates", ":type page: int :param per_page: Items per page :type per_page:", "noqa: E501 \"\"\"Get a single DataTransformation by its id Gets", "query :type username: str :param label: Filter by label :type", "page :type per_page: int :rtype: List[DataTransformation] \"\"\" return query_manager.get_resource( username=username,", ":rtype: List[DataTransformation] \"\"\" return query_manager.get_resource( username=username, label=label, page=page, per_page=per_page, rdf_type_uri=DATATRANSFORMATION_TYPE_URI,", "# noqa: E501 :param user: Username :type user: str :param", "of data transformations related a dataset # noqa: E501 :param", "def datatransformations_id_delete(id, user=None): # noqa: E501 \"\"\"Delete an existing DataTransformation", ":param label: Filter by label :type label: str :param page:", "username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_get(username=None, label=None, page=None, per_page=None): #", "\"\"\" return query_manager.get_resource(id=id, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_put(id, user=None,", "list of all instances of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation)", ":param id: The ID of the DataTransformation to be retrieved", "kls=DataTransformation) def datatransformations_id_put(id, user=None, data_transformation=None): # noqa: E501 \"\"\"Update an", "user=None): # noqa: E501 \"\"\"Delete an existing DataTransformation Delete an", "openapi_server import query_manager from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI from openapi_server.models.data_transformation", "the DataTransformation to be retrieved :type id: str :param user:", "str :rtype: None \"\"\" return query_manager.delete_resource(id=id, user=user, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation)", "noqa: E501 :param username: Name of the user graph to", "str :param user: Username :type user: str :rtype: None \"\"\"", "openapi_server.models.data_transformation import DataTransformation # noqa: E501 from openapi_server import util", ":type username: str :param label: Filter by label :type label:", "\"\"\"Update an existing DataTransformation Updates an existing DataTransformation (more information", "ID of the dataspecification :type id: str :param custom_query_name: Name", "rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_post(user=None, data_transformation=None): # noqa: E501 \"\"\"Create one", ":rtype: DataTransformation \"\"\" return query_manager.get_resource(id=id, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def", "rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_get(id, username=None): # noqa: E501 \"\"\"Get", "query_manager from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI from openapi_server.models.data_transformation import DataTransformation", "user: Username :type user: str :rtype: None \"\"\" return query_manager.delete_resource(id=id,", "Delete an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa:", "per page :type per_page: int :rtype: List[DataTransformation] \"\"\" return query_manager.get_resource(", "E501 \"\"\"Create one DataTransformation Create a new instance of DataTransformation", "DataTransformation \"\"\" if connexion.request.is_json: data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501", "an existing DataTransformation Updates an existing DataTransformation (more information in", "number :type page: int :param per_page: Items per page :type", "# noqa: E501 \"\"\"Create one DataTransformation Create a new instance", "int :param per_page: Items per page :type per_page: int :rtype:", "per_page: int :rtype: List[DataTransformation] \"\"\" return query_manager.get_resource( username=username, label=label, page=page,", "noqa: E501 \"\"\"Create one DataTransformation Create a new instance of", "custom_query_name=custom_query_name, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_get(username=None, label=None, page=None, per_page=None):", "its id Gets the details of a given DataTransformation (more", "the details of a given DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation)", "import query_manager from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI from openapi_server.models.data_transformation import", "int :rtype: List[DataTransformation] \"\"\" return query_manager.get_resource( username=username, label=label, page=page, per_page=per_page,", "per_page=per_page, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_delete(id, user=None): # noqa: E501", "related a dataset # noqa: E501 :param id: The ID", ":type id: str :param user: Username :type user: str :param", "connexion import six from openapi_server import query_manager from openapi_server.utils.vars import", "str :param user: Username :type user: str :param data_transformation: An", "openapi_server import util def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): # noqa: E501", "E501 :param username: Name of the user graph to query", "page: Page number :type page: int :param per_page: Items per", "Username :type user: str :param data_transformation: Information about the DataTransformationto", ":param page: Page number :type page: int :param per_page: Items", "E501 return query_manager.put_resource(id=id, user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_post(user=None,", "custom query :type custom_query_name: str :param username: Username to query", "Name of the custom query :type custom_query_name: str :param username:", "id: str :param user: Username :type user: str :param data_transformation:", "(more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param user: Username", "datatransformations_get(username=None, label=None, page=None, per_page=None): # noqa: E501 \"\"\"List all instances", "Information about the DataTransformationto be created :type data_transformation: dict |", "https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param username: Name of the user", "rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_delete(id, user=None): # noqa: E501 \"\"\"Delete", "kls=DataTransformation) def datatransformations_post(user=None, data_transformation=None): # noqa: E501 \"\"\"Create one DataTransformation", "def datatransformations_id_put(id, user=None, data_transformation=None): # noqa: E501 \"\"\"Update an existing", "a list of data transformations related a dataset # noqa:", "DataTransformation by its id Gets the details of a given", "Gets the details of a given DataTransformation (more information in", "username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_put(id, user=None, data_transformation=None): # noqa:", "graph to query :type username: str :param label: Filter by", "graph to query :type username: str :rtype: DataTransformation \"\"\" return", "None \"\"\" return query_manager.delete_resource(id=id, user=user, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_get(id,", "of all instances of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) #", "E501 :param id: The ID of the dataspecification :type id:", "E501 \"\"\"Delete an existing DataTransformation Delete an existing DataTransformation (more", "# noqa: E501 return query_manager.post_resource( user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation)", "id Gets the details of a given DataTransformation (more information", "instance of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501", "by label :type label: str :param page: Page number :type", "\"\"\" return query_manager.get_resource( username=username, label=label, page=page, per_page=per_page, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation)", "information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param id: The ID", "ID of the DataTransformation to be retrieved :type id: str", "dict | bytes :rtype: DataTransformation \"\"\" if connexion.request.is_json: data_transformation =", "a new instance of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) #", "Create a new instance of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation)", "\"\"\"Gets a list of data transformations related a dataset Gets", "if connexion.request.is_json: data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return query_manager.post_resource(", "dataset # noqa: E501 :param id: The ID of the", ":rtype: None \"\"\" return query_manager.delete_resource(id=id, user=user, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def", "Updates an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa:", "username: Name of the user graph to query :type username:", "\"\"\" return query_manager.get_resource(id=id, custom_query_name=custom_query_name, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_get(username=None,", "of the dataspecification :type id: str :param custom_query_name: Name of", "label: str :param page: Page number :type page: int :param", "username: str :rtype: List[DataTransformation] \"\"\" return query_manager.get_resource(id=id, custom_query_name=custom_query_name, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI,", "def datatransformations_id_get(id, username=None): # noqa: E501 \"\"\"Get a single DataTransformation", "noqa: E501 :param id: The ID of the dataspecification :type", "The ID of the dataspecification :type id: str :param custom_query_name:", ":param user: Username :type user: str :param data_transformation: An old", "import DataTransformation # noqa: E501 from openapi_server import util def", "one DataTransformation Create a new instance of DataTransformation (more information", ":param user: Username :type user: str :rtype: None \"\"\" return", "rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_get(id, username=None): # noqa: E501 \"\"\"Get a", "List[DataTransformation] \"\"\" return query_manager.get_resource(id=id, custom_query_name=custom_query_name, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def", "| bytes :rtype: DataTransformation \"\"\" if connexion.request.is_json: data_transformation = DataTransformation.from_dict(connexion.request.get_json())", "\"\"\"Delete an existing DataTransformation Delete an existing DataTransformation (more information", "DataTransformation to be retrieved :type id: str :param user: Username", "util def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): # noqa: E501 \"\"\"Gets a", "(more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param id: The", "connexion.request.is_json: data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return query_manager.post_resource( user=user,", "in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param id: The ID of", "List[DataTransformation] \"\"\" return query_manager.get_resource( username=username, label=label, page=page, per_page=per_page, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME,", "openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI from openapi_server.models.data_transformation import DataTransformation # noqa:", "DataTransformation Delete an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) #", "username: str :param label: Filter by label :type label: str", "be updated :type data_transformation: dict | bytes :rtype: DataTransformation \"\"\"", "username=None): # noqa: E501 \"\"\"Get a single DataTransformation by its", ":rtype: DataTransformation \"\"\" if connexion.request.is_json: data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa:", "created :type data_transformation: dict | bytes :rtype: DataTransformation \"\"\" if", "query :type custom_query_name: str :param username: Username to query :type", "\"\"\"Create one DataTransformation Create a new instance of DataTransformation (more", "custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): # noqa: E501 \"\"\"Gets a list of", "to query :type username: str :rtype: List[DataTransformation] \"\"\" return query_manager.get_resource(id=id,", "# noqa: E501 from openapi_server import util def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None,", "import six from openapi_server import query_manager from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME,", "DataTransformation \"\"\" return query_manager.get_resource(id=id, username=username, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_put(id,", "a dataset Gets a list of data transformations related a", "information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param user: Username :type", ":type id: str :param custom_query_name: Name of the custom query", "# noqa: E501 \"\"\"Update an existing DataTransformation Updates an existing", "E501 \"\"\"Get a single DataTransformation by its id Gets the", "noqa: E501 :param user: Username :type user: str :param data_transformation:", "# noqa: E501 return query_manager.put_resource(id=id, user=user, body=data_transformation, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation)", "of a given DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa:", "noqa: E501 \"\"\"Update an existing DataTransformation Updates an existing DataTransformation", "E501 :param id: The ID of the DataTransformation to be", "data transformations related a dataset # noqa: E501 :param id:", "def datatransformations_get(username=None, label=None, page=None, per_page=None): # noqa: E501 \"\"\"List all", "from openapi_server import query_manager from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI from", "rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_post(user=None, data_transformation=None): # noqa: E501 \"\"\"Create", "data_transformation: Information about the DataTransformationto be created :type data_transformation: dict", ":type id: str :param username: Name of the user graph", "data transformations related a dataset Gets a list of data", "username: Username to query :type username: str :rtype: List[DataTransformation] \"\"\"", "all instances of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa:", "noqa: E501 :param id: The ID of the DataTransformation to", "user: Username :type user: str :param data_transformation: An old DataTransformationto", "retrieved :type id: str :param user: Username :type user: str", ":type user: str :rtype: None \"\"\" return query_manager.delete_resource(id=id, user=user, rdf_type_uri=DATATRANSFORMATION_TYPE_URI,", "if connexion.request.is_json: data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return query_manager.put_resource(id=id,", "to be retrieved :type id: str :param user: Username :type", "all instances of DataTransformation Gets a list of all instances", "be retrieved :type id: str :param username: Name of the", "of the user graph to query :type username: str :rtype:", "Username :type user: str :param data_transformation: An old DataTransformationto be", "DataTransformation Gets a list of all instances of DataTransformation (more", "data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501 return query_manager.post_resource( user=user, body=data_transformation,", "https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param user: Username :type user: str", "str :param label: Filter by label :type label: str :param", "to be retrieved :type id: str :param username: Name of", "\"\"\" return query_manager.delete_resource(id=id, user=user, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_get(id, username=None):", "by its id Gets the details of a given DataTransformation", "of the user graph to query :type username: str :param", "user graph to query :type username: str :param label: Filter", ":param per_page: Items per page :type per_page: int :rtype: List[DataTransformation]", "given DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param", "\"\"\"Get a single DataTransformation by its id Gets the details", "the user graph to query :type username: str :rtype: DataTransformation", "noqa: E501 \"\"\"Gets a list of data transformations related a", ":type per_page: int :rtype: List[DataTransformation] \"\"\" return query_manager.get_resource( username=username, label=label,", ":type user: str :param data_transformation: An old DataTransformationto be updated", "def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): # noqa: E501 \"\"\"Gets a list", ":type id: str :param user: Username :type user: str :rtype:", "noqa: E501 \"\"\"List all instances of DataTransformation Gets a list", "in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501 :param username: Name of the", "label=label, page=page, per_page=per_page, rdf_type_uri=DATATRANSFORMATION_TYPE_URI, rdf_type_name=DATATRANSFORMATION_TYPE_NAME, kls=DataTransformation) def datatransformations_id_delete(id, user=None): #", "id: str :param user: Username :type user: str :rtype: None", ":param custom_query_name: Name of the custom query :type custom_query_name: str", "id: The ID of the DataTransformation to be retrieved :type", "noqa: E501 from openapi_server import util def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None):", "old DataTransformationto be updated :type data_transformation: dict | bytes :rtype:", "the user graph to query :type username: str :param label:", "label :type label: str :param page: Page number :type page:" ]
[ "x # features) feature_names : list Names of the features", "int Index of the feature to plot. shap_values : numpy.array", "\"...\" + text[-int(max_len/2)+1:] else: return text def monitoring_plot(ind, shap_values, features,", "plot is meant to display the behavior of a model", "+ \"\\nSHAP value\", size=13) pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) cb =", "features.columns features = features.values pl.figure(figsize=(10,3)) ys = shap_values[:,ind] xs =", "= pl.colorbar() cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) *", "inc): #stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative=\"two-sided\") stat, pval =", "given to this plot explain the loss of a model,", "help in monitoring the model's performance. Parameters ---------- ind :", "Matrix of SHAP values (# samples x # features) features", "None: feature_names = features.columns features = features.values pl.figure(figsize=(10,3)) ys =", "12*2, len(ys)) pvals = [] inc = 50 for i", "SHAP monitoring plot is meant to display the behavior of", "not be loaded!\") pass from . import labels from .", "> max_len: return text[:int(max_len/2)-2] + \"...\" + text[-int(max_len/2)+1:] else: return", "x # features) features : numpy.array or pandas.DataFrame Matrix of", "# features) feature_names : list Names of the features (length", "alternative=\"two-sided\") stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:]) pvals.append(pval) min_pval = np.min(pvals)", "+ inc if min_pval < 0.05 / shap_values.shape[1]: pl.axvline(min_pval_ind, linestyle=\"dashed\",", "size=13) pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) cb = pl.colorbar() cb.outline.set_visible(False) bbox", "the loss of a model, so changes in a feature's", "of feature values (# samples x # features) feature_names :", "feature's impact on the model's loss over time can help", "of SHAP values (# samples x # features) features :", "import scipy import warnings try: import matplotlib.pyplot as pl import", "performance. Parameters ---------- ind : int Index of the feature", "pvals = [] inc = 50 for i in range(inc,", "inc if min_pval < 0.05 / shap_values.shape[1]: pl.axvline(min_pval_ind, linestyle=\"dashed\", color=\"#666666\",", "from . import colors def truncate_text(text, max_len): if len(text) >", "feature_names = features.columns features = features.values pl.figure(figsize=(10,3)) ys = shap_values[:,ind]", "import matplotlib.pyplot as pl import matplotlib except ImportError: warnings.warn(\"matplotlib could", "Create a SHAP monitoring plot. (Note this function is preliminary", "scipy.stats.mannwhitneyu(v[:i], v[i:], alternative=\"two-sided\") stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:]) pvals.append(pval) min_pval", "as np import scipy import warnings try: import matplotlib.pyplot as", "if feature_names is None: feature_names = features.columns features = features.values", "in a feature's impact on the model's loss over time", "the model's loss over time can help in monitoring the", "preliminary and subject to change!!) A SHAP monitoring plot is", "i in range(inc, len(ys)-inc, inc): #stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:],", "= features.columns features = features.values pl.figure(figsize=(10,3)) ys = shap_values[:,ind] xs", "s=10, c=features[:,ind], cmap=colors.red_blue) pl.xlabel(\"Sample index\") pl.ylabel(truncate_text(feature_names[ind], 30) + \"\\nSHAP value\",", "numpy as np import scipy import warnings try: import matplotlib.pyplot", "cmap=colors.red_blue) pl.xlabel(\"Sample index\") pl.ylabel(truncate_text(feature_names[ind], 30) + \"\\nSHAP value\", size=13) pl.gca().xaxis.set_ticks_position('bottom')", "labels from . import colors def truncate_text(text, max_len): if len(text)", "= np.argmin(pvals)*inc + inc if min_pval < 0.05 / shap_values.shape[1]:", "on the model's loss over time can help in monitoring", "ind : int Index of the feature to plot. shap_values", "the feature to plot. shap_values : numpy.array Matrix of SHAP", "as pl import matplotlib except ImportError: warnings.warn(\"matplotlib could not be", "text def monitoring_plot(ind, shap_values, features, feature_names=None): \"\"\" Create a SHAP", "features = features.values pl.figure(figsize=(10,3)) ys = shap_values[:,ind] xs = np.arange(len(ys))#np.linspace(0,", "pl.gca().spines['top'].set_visible(False) cb = pl.colorbar() cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height -", "to this plot explain the loss of a model, so", "and subject to change!!) A SHAP monitoring plot is meant", "def monitoring_plot(ind, shap_values, features, feature_names=None): \"\"\" Create a SHAP monitoring", "max_len): if len(text) > max_len: return text[:int(max_len/2)-2] + \"...\" +", "colors def truncate_text(text, max_len): if len(text) > max_len: return text[:int(max_len/2)-2]", "time can help in monitoring the model's performance. Parameters ----------", "of the features (length # features) \"\"\" if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"): if", "feature_names is None: feature_names = features.columns features = features.values pl.figure(figsize=(10,3))", "feature_names=None): \"\"\" Create a SHAP monitoring plot. (Note this function", ". import colors def truncate_text(text, max_len): if len(text) > max_len:", "is meant to display the behavior of a model over", "model's loss over time can help in monitoring the model's", "numpy.array Matrix of SHAP values (# samples x # features)", "value\", size=13) pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) cb = pl.colorbar() cb.outline.set_visible(False)", "matplotlib except ImportError: warnings.warn(\"matplotlib could not be loaded!\") pass from", "this plot explain the loss of a model, so changes", "min_pval_ind = np.argmin(pvals)*inc + inc if min_pval < 0.05 /", "inc = 50 for i in range(inc, len(ys)-inc, inc): #stat,", "monitoring_plot(ind, shap_values, features, feature_names=None): \"\"\" Create a SHAP monitoring plot.", "alpha=0.2) pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue) pl.xlabel(\"Sample index\") pl.ylabel(truncate_text(feature_names[ind], 30)", "pl.colorbar() cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 20)", "values (# samples x # features) feature_names : list Names", "= features.values pl.figure(figsize=(10,3)) ys = shap_values[:,ind] xs = np.arange(len(ys))#np.linspace(0, 12*2,", "return text[:int(max_len/2)-2] + \"...\" + text[-int(max_len/2)+1:] else: return text def", "monitoring plot is meant to display the behavior of a", "feature to plot. shap_values : numpy.array Matrix of SHAP values", "model, so changes in a feature's impact on the model's", "np.argmin(pvals)*inc + inc if min_pval < 0.05 / shap_values.shape[1]: pl.axvline(min_pval_ind,", "try: import matplotlib.pyplot as pl import matplotlib except ImportError: warnings.warn(\"matplotlib", "pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative=\"two-sided\") stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:])", "a feature's impact on the model's loss over time can", "shap_values, features, feature_names=None): \"\"\" Create a SHAP monitoring plot. (Note", "the behavior of a model over time. Often the shap_values", "feature_names : list Names of the features (length # features)", "# features) \"\"\" if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"): if feature_names is None: feature_names", "cb = pl.colorbar() cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7)", "change!!) A SHAP monitoring plot is meant to display the", "Parameters ---------- ind : int Index of the feature to", "def truncate_text(text, max_len): if len(text) > max_len: return text[:int(max_len/2)-2] +", "shap_values[:,ind] xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys)) pvals = [] inc", "function is preliminary and subject to change!!) A SHAP monitoring", "in monitoring the model's performance. Parameters ---------- ind : int", "50 for i in range(inc, len(ys)-inc, inc): #stat, pval =", "ImportError: warnings.warn(\"matplotlib could not be loaded!\") pass from . import", "or pandas.DataFrame Matrix of feature values (# samples x #", "samples x # features) feature_names : list Names of the", "scipy.stats.ttest_ind(ys[:i], ys[i:]) pvals.append(pval) min_pval = np.min(pvals) min_pval_ind = np.argmin(pvals)*inc +", "loss over time can help in monitoring the model's performance.", "range(inc, len(ys)-inc, inc): #stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative=\"two-sided\") stat,", "\"\\nSHAP value\", size=13) pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) cb = pl.colorbar()", "if len(text) > max_len: return text[:int(max_len/2)-2] + \"...\" + text[-int(max_len/2)+1:]", "= 50 for i in range(inc, len(ys)-inc, inc): #stat, pval", "features, feature_names=None): \"\"\" Create a SHAP monitoring plot. (Note this", "be loaded!\") pass from . import labels from . import", "model's performance. Parameters ---------- ind : int Index of the", "features.values pl.figure(figsize=(10,3)) ys = shap_values[:,ind] xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys))", "from . import labels from . import colors def truncate_text(text,", "matplotlib.pyplot as pl import matplotlib except ImportError: warnings.warn(\"matplotlib could not", "a model, so changes in a feature's impact on the", "import colors def truncate_text(text, max_len): if len(text) > max_len: return", "shap_values : numpy.array Matrix of SHAP values (# samples x", "SHAP values (# samples x # features) features : numpy.array", "max_len: return text[:int(max_len/2)-2] + \"...\" + text[-int(max_len/2)+1:] else: return text", "time. Often the shap_values given to this plot explain the", "= np.arange(len(ys))#np.linspace(0, 12*2, len(ys)) pvals = [] inc = 50", "\"\"\" if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"): if feature_names is None: feature_names = features.columns", "[] inc = 50 for i in range(inc, len(ys)-inc, inc):", "subject to change!!) A SHAP monitoring plot is meant to", "pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) cb = pl.colorbar() cb.outline.set_visible(False) bbox =", "the features (length # features) \"\"\" if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"): if feature_names", "warnings.warn(\"matplotlib could not be loaded!\") pass from . import labels", "shap_values given to this plot explain the loss of a", "import warnings try: import matplotlib.pyplot as pl import matplotlib except", "monitoring plot. (Note this function is preliminary and subject to", "bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 20) cb.set_label(truncate_text(feature_names[ind], 30),", "changes in a feature's impact on the model's loss over", "model over time. Often the shap_values given to this plot", "min_pval = np.min(pvals) min_pval_ind = np.argmin(pvals)*inc + inc if min_pval", "= shap_values[:,ind] xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys)) pvals = []", "the shap_values given to this plot explain the loss of", "pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) cb = pl.colorbar() cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())", "stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:]) pvals.append(pval) min_pval = np.min(pvals) min_pval_ind", "c=features[:,ind], cmap=colors.red_blue) pl.xlabel(\"Sample index\") pl.ylabel(truncate_text(feature_names[ind], 30) + \"\\nSHAP value\", size=13)", "cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 20) cb.set_label(truncate_text(feature_names[ind],", "plot explain the loss of a model, so changes in", "the model's performance. Parameters ---------- ind : int Index of", "ys[i:]) pvals.append(pval) min_pval = np.min(pvals) min_pval_ind = np.argmin(pvals)*inc + inc", "index\") pl.ylabel(truncate_text(feature_names[ind], 30) + \"\\nSHAP value\", size=13) pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False)", "is preliminary and subject to change!!) A SHAP monitoring plot", "# features) features : numpy.array or pandas.DataFrame Matrix of feature", "plot. shap_values : numpy.array Matrix of SHAP values (# samples", "pl.ylabel(truncate_text(feature_names[ind], 30) + \"\\nSHAP value\", size=13) pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False)", "---------- ind : int Index of the feature to plot.", ": list Names of the features (length # features) \"\"\"", "len(ys)) pvals = [] inc = 50 for i in", "to display the behavior of a model over time. Often", "Index of the feature to plot. shap_values : numpy.array Matrix", "return text def monitoring_plot(ind, shap_values, features, feature_names=None): \"\"\" Create a", "if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"): if feature_names is None: feature_names = features.columns features", "SHAP monitoring plot. (Note this function is preliminary and subject", ": numpy.array or pandas.DataFrame Matrix of feature values (# samples", "else: return text def monitoring_plot(ind, shap_values, features, feature_names=None): \"\"\" Create", "+ text[-int(max_len/2)+1:] else: return text def monitoring_plot(ind, shap_values, features, feature_names=None):", "to change!!) A SHAP monitoring plot is meant to display", "loss of a model, so changes in a feature's impact", "monitoring the model's performance. Parameters ---------- ind : int Index", "pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) cb = pl.colorbar() cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height", "features : numpy.array or pandas.DataFrame Matrix of feature values (#", "display the behavior of a model over time. Often the", "Matrix of feature values (# samples x # features) feature_names", ". import labels from . import colors def truncate_text(text, max_len):", "pl import matplotlib except ImportError: warnings.warn(\"matplotlib could not be loaded!\")", "values (# samples x # features) features : numpy.array or", "= scipy.stats.ttest_ind(ys[:i], ys[i:]) pvals.append(pval) min_pval = np.min(pvals) min_pval_ind = np.argmin(pvals)*inc", "np.min(pvals) min_pval_ind = np.argmin(pvals)*inc + inc if min_pval < 0.05", "pl.axvline(min_pval_ind, linestyle=\"dashed\", color=\"#666666\", alpha=0.2) pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue) pl.xlabel(\"Sample", "features) feature_names : list Names of the features (length #", "#stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative=\"two-sided\") stat, pval = scipy.stats.ttest_ind(ys[:i],", "scipy import warnings try: import matplotlib.pyplot as pl import matplotlib", "import labels from . import colors def truncate_text(text, max_len): if", "features) \"\"\" if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"): if feature_names is None: feature_names =", "a model over time. Often the shap_values given to this", "= [] inc = 50 for i in range(inc, len(ys)-inc,", "text[-int(max_len/2)+1:] else: return text def monitoring_plot(ind, shap_values, features, feature_names=None): \"\"\"", "shap_values.shape[1]: pl.axvline(min_pval_ind, linestyle=\"dashed\", color=\"#666666\", alpha=0.2) pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue)", "over time. Often the shap_values given to this plot explain", "explain the loss of a model, so changes in a", "list Names of the features (length # features) \"\"\" if", "in range(inc, len(ys)-inc, inc): #stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative=\"two-sided\")", "len(ys)-inc, inc): #stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative=\"two-sided\") stat, pval", "pl.xlabel(\"Sample index\") pl.ylabel(truncate_text(feature_names[ind], 30) + \"\\nSHAP value\", size=13) pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left')", "30) + \"\\nSHAP value\", size=13) pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) cb", "loaded!\") pass from . import labels from . import colors", "impact on the model's loss over time can help in", "warnings try: import matplotlib.pyplot as pl import matplotlib except ImportError:", ": int Index of the feature to plot. shap_values :", "0.05 / shap_values.shape[1]: pl.axvline(min_pval_ind, linestyle=\"dashed\", color=\"#666666\", alpha=0.2) pl.scatter(xs, ys, s=10,", "ys, s=10, c=features[:,ind], cmap=colors.red_blue) pl.xlabel(\"Sample index\") pl.ylabel(truncate_text(feature_names[ind], 30) + \"\\nSHAP", "pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue) pl.xlabel(\"Sample index\") pl.ylabel(truncate_text(feature_names[ind], 30) +", "np import scipy import warnings try: import matplotlib.pyplot as pl", "(# samples x # features) feature_names : list Names of", "A SHAP monitoring plot is meant to display the behavior", "can help in monitoring the model's performance. Parameters ---------- ind", "min_pval < 0.05 / shap_values.shape[1]: pl.axvline(min_pval_ind, linestyle=\"dashed\", color=\"#666666\", alpha=0.2) pl.scatter(xs,", "features) features : numpy.array or pandas.DataFrame Matrix of feature values", "< 0.05 / shap_values.shape[1]: pl.axvline(min_pval_ind, linestyle=\"dashed\", color=\"#666666\", alpha=0.2) pl.scatter(xs, ys,", "cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 20) cb.set_label(truncate_text(feature_names[ind], 30), size=13) pl.show()", "ys = shap_values[:,ind] xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys)) pvals =", "plot. (Note this function is preliminary and subject to change!!)", "\"\"\" Create a SHAP monitoring plot. (Note this function is", "behavior of a model over time. Often the shap_values given", "import numpy as np import scipy import warnings try: import", "np.arange(len(ys))#np.linspace(0, 12*2, len(ys)) pvals = [] inc = 50 for", "numpy.array or pandas.DataFrame Matrix of feature values (# samples x", "so changes in a feature's impact on the model's loss", "of the feature to plot. shap_values : numpy.array Matrix of", "truncate_text(text, max_len): if len(text) > max_len: return text[:int(max_len/2)-2] + \"...\"", "pass from . import labels from . import colors def", ": numpy.array Matrix of SHAP values (# samples x #", "to plot. shap_values : numpy.array Matrix of SHAP values (#", "is None: feature_names = features.columns features = features.values pl.figure(figsize=(10,3)) ys", "if min_pval < 0.05 / shap_values.shape[1]: pl.axvline(min_pval_ind, linestyle=\"dashed\", color=\"#666666\", alpha=0.2)", "of a model, so changes in a feature's impact on", "features (length # features) \"\"\" if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"): if feature_names is", "= scipy.stats.mannwhitneyu(v[:i], v[i:], alternative=\"two-sided\") stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:]) pvals.append(pval)", "except ImportError: warnings.warn(\"matplotlib could not be loaded!\") pass from .", "= np.min(pvals) min_pval_ind = np.argmin(pvals)*inc + inc if min_pval <", "samples x # features) features : numpy.array or pandas.DataFrame Matrix", "over time can help in monitoring the model's performance. Parameters", "for i in range(inc, len(ys)-inc, inc): #stat, pval = scipy.stats.mannwhitneyu(v[:i],", "str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"): if feature_names is None: feature_names = features.columns features =", "this function is preliminary and subject to change!!) A SHAP", "= cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 20) cb.set_label(truncate_text(feature_names[ind], 30), size=13)", "+ \"...\" + text[-int(max_len/2)+1:] else: return text def monitoring_plot(ind, shap_values,", "a SHAP monitoring plot. (Note this function is preliminary and", "Names of the features (length # features) \"\"\" if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"):", "pvals.append(pval) min_pval = np.min(pvals) min_pval_ind = np.argmin(pvals)*inc + inc if", "of a model over time. Often the shap_values given to", "pl.figure(figsize=(10,3)) ys = shap_values[:,ind] xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys)) pvals", "feature values (# samples x # features) feature_names : list", "meant to display the behavior of a model over time.", "color=\"#666666\", alpha=0.2) pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue) pl.xlabel(\"Sample index\") pl.ylabel(truncate_text(feature_names[ind],", "pandas.DataFrame Matrix of feature values (# samples x # features)", "could not be loaded!\") pass from . import labels from", "Often the shap_values given to this plot explain the loss", "linestyle=\"dashed\", color=\"#666666\", alpha=0.2) pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue) pl.xlabel(\"Sample index\")", "/ shap_values.shape[1]: pl.axvline(min_pval_ind, linestyle=\"dashed\", color=\"#666666\", alpha=0.2) pl.scatter(xs, ys, s=10, c=features[:,ind],", "(length # features) \"\"\" if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"): if feature_names is None:", "(# samples x # features) features : numpy.array or pandas.DataFrame", "v[i:], alternative=\"two-sided\") stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:]) pvals.append(pval) min_pval =", "pval = scipy.stats.ttest_ind(ys[:i], ys[i:]) pvals.append(pval) min_pval = np.min(pvals) min_pval_ind =", "len(text) > max_len: return text[:int(max_len/2)-2] + \"...\" + text[-int(max_len/2)+1:] else:", "import matplotlib except ImportError: warnings.warn(\"matplotlib could not be loaded!\") pass", "text[:int(max_len/2)-2] + \"...\" + text[-int(max_len/2)+1:] else: return text def monitoring_plot(ind,", "(Note this function is preliminary and subject to change!!) A", "xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys)) pvals = [] inc =" ]
[ "end = path.rfind(separator, 0, len(path)-1) if start < end: return", "+-\", db) if view == 'list': print('[', srv, '->', db,", "keepasa def do_setMaster(self,args): \"Set master password\" if sys.stdin.isatty(): # jezeli", "path\" print(common.get_cdir()) # pozwala na decyzję czy chcemy wyświetlać warningi", "text, line, begidx, endidx): if not text: completions = self.directories[:]", "self.directories = [] self.file_server_database = [] self.file_server = [] self.do_cd('.')", "servers[srv][\"databases\"] for db in databases: print('| | +-', db) else:", "if values_num == 1: if values[0] == 'on': print('Warnings on')", "ParseArgsException as e: print(e) # ustawia masterpassword dla keepasa def", "[NO/yes/info]: \") if ans == \"yes\": #wykonaj callback for file", "- file.server.base if values == '': # wykonaj na wszystkich", "if isinstance(e1,KeePassError): raise KeePassError(\"Unable to use Keepass(\" + e1.value +", "do_cd(self, args): \"Move to directory\" if args == '': print(common.get_cdir())", "start < end: return (path[0:start+1] + '...' + path[end:]) else:", "'tree': print(\"| +-\", db) if view == 'list': print('[', srv,", "nie ma pliku, zly master itp. i zwrocic 1 wyjątek", "czegokolwiek print(\"aborted\") else: # jeżeli specjalizujemy na czym chcemy wykonać", "== 0: r_list = [] for l in list: r_list.append(l.replace('\"',", "print('+-', file) servers = ConfigManager(\"config/\" + file + \".yaml\").get_all() for", "separator = '' if '\\\\' in path: separator = '\\\\'", "wyjątek def get_password(self, alias): keepass_path = common.keepass_path if self.master ==", "for file in files: print('+-',file) ans = input(\"Are you sure?", "(arg_counter == n and m == 0) or n ==", "databases: print('| | +-', db) else: #jeżeli nie zdecydujemy się", "e def connect_command_builder(self,connection, perm): try: command = connection[\"adress\"] + \"_\"", "p def do_exit(self, *args): return True def do_EOF(self, line): return", "link - file.server.base if values == '': # wykonaj na", "print('Warnings on') self.warn = True elif values[0] == 'off': print('Warnings", "tylko pliku to wykonaj na wszystkich serwerach, bazach które są", "as e: print(e) except KeyError as e: print(e, \"is not", "+ connection[\"user\"]+ \"_\" + \\ connection[\"passwd\"] + \"_\" + str(connection[\"sshport\"])", "wszystkich plikach files = ConfigManager().get_config_list() # pobierz listę plików konfiguracyjnych", "callback(val[0], val[1], val[2], *args) except ConfigManagerError as e: print(e) except", "+ \".yaml\").get_all() for srv in servers: print('| +-', srv) databases", "to wykonaj na wszystkich bazach na serwerze file = val[0]", "self.prompt_sign = '#>' elif module != '': self.prompt_sign = '['", "db in databases: print('| | +-', db) else: #jeżeli nie", "ma pliku, zly master itp. i zwrocic 1 wyjątek def", "file + \".yaml\").get_all() for srv in servers: if view ==", "if view == 'list': print('[', file, '->', srv, '->', db,", "\\ connection[\"passwd\"] + \"_\" + str(connection[\"sshport\"]) + \"_\" + str(connection[\"remoteport\"])", "except ConfigManagerError as e: print(e) except KeyError as e: print(e,", "print(\"Exec on:\") for file in files: print('+-',file) ans = input(\"Are", "w lokalizacji def do_ls(self, args): \"List directory\" for name in", "= len(val) if params == 1: # jeżeli podano nazwę", "== 'on': print('Warnings on') self.warn = True elif values[0] ==", "len(list)) else: raise ParseArgsException(\"Incorrect number of arguments\") # wykonuje daną", "wykonaj na wszystkich bazach na serwerze file = val[0] try:", "- pozwala na przemieszczanie się po katalogach def do_cd(self, args):", "elif values[0] == 'off': print('Warnings off') self.warn = False else:", "print(\"+-\", db) if view == 'list': print('[', db, ']') callback(file,", "self.file_server = [] self.do_cd('.') configs = ConfigManager().get_config_list() for conf in", "view == 'tree': print(\"| | +-\", db) if view ==", "as e: raise e def connect_command_builder(self,connection, perm): try: command =", "= path.rfind(separator, 0, len(path)-1) if start < end: return (path[0:start+1]", "nim zapisane file = val[0] try: servers = ConfigManager(\"config/\" +", "if params == 1: # jeżeli podano nazwę tylko pliku", "re.findall('\"+.*\"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string) arg_counter = len(list); if (arg_counter >= n and", "val[0] try: servers = ConfigManager(\"config/\" + file + \".yaml\").get_all() srv", "się po katalogach def do_cd(self, args): \"Move to directory\" if", "if self.master == None: raise KeePassError(\"Master Password Not Set\") try:", "except KeePassError as e: raise e def connect_command_builder(self,connection, perm): try:", "# wyświetl na czym będziesz wykonywać print(\"Exec on:\") for file", "ParseArgsException(\"Incorrect number of arguments\") # wykonuje daną funkcję (callback) na", "arguments\") # wykonuje daną funkcję (callback) na wszystkich bazach def", "na czym będziesz wykonywać print(\"Exec on:\") for file in files:", "srv in servers: if view == 'tree': print(\"+-\", srv) databases", "self.warn == True: print('Status: on') else: print('Status: off') except ParseArgsException", "[] self.file_server_database = [] self.file_server = [] self.do_cd('.') configs =", "not exist\") elif params == 2: # jeżeli podano nazwę", "name)): self.directories.append(name) except FileNotFoundError as e: print(e) # wyświetla wszystkie", "| +-\", db) if view == 'list': print('[', file, '->',", "= connection[\"adress\"] + \"_\" + connection[\"user\"]+ \"_\" + \\ connection[\"passwd\"]", "= len(list); if (arg_counter >= n and arg_counter <= m)", "\".yaml\").get_all() srv = val[1] databases = servers[srv][\"databases\"] for db in", "getpass import getpass from kp import KeePassError, get_password from configmanager", "print(\"| +-\", db) if view == 'list': print('[', srv, '->',", "if view == 'tree': print(\"+-\", db) if view == 'list':", "zapisane file = val[0] try: servers = ConfigManager(\"config/\" + file", "*args): return True def do_EOF(self, line): return True def emptyline(self):", "+ \"_\" + str(connection[\"sshport\"]) + \"_\" + str(connection[\"remoteport\"]) + \"_\"", "import sys import common from getpass import getpass from kp", "for file in files: print('+-', file) servers = ConfigManager(\"config/\" +", "wykonaj na wszystkich serwerach, bazach które są w nim zapisane", "jeżeli podano nazwę tylko pliku to wykonaj na wszystkich serwerach,", "return command except KeyError as e2: if isinstance(e1,KeePassError): raise KeePassError(\"Unable", "use Keepass(\" + e1.value + \") or Password\") else: raise", "return (path) # autouzupełnienia dla cmd polecenia cd def complete_cd(self,", "print('Incorrect argument.') else: if self.warn == True: print('Status: on') else:", "in databases: print('| | +-', db) else: #jeżeli nie zdecydujemy", "polecenie cd - pozwala na przemieszczanie się po katalogach def", "srv, '->', db, ']') callback(file, srv, db, *args) except ConfigManagerError", "'.' + srv) for db in ConfigManager('config/' + conf +", "\"Set master password\" if sys.stdin.isatty(): # jezeli jako shell p", "for srv in servers: print('| +-', srv) databases = servers[srv][\"databases\"]", "+ '.yaml').get_all(): self.file_server_database.append(conf + '.' + srv) self.file_server.append(conf + '.'", "file) try: servers = ConfigManager(\"config/\" + file + \".yaml\").get_all() for", "as e: print(e) # ustawia masterpassword dla keepasa def do_setMaster(self,args):", "wszystkich bazach def exec_on_config(self, callback, args, values, view = ''):", "cd def complete_cd(self, text, line, begidx, endidx): if not text:", "not exist\") # zwraca skróconą ścieżkę do aktualnego katalogu -", "n=0, m=0): list = re.findall('\"+.*\"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string) arg_counter = len(list); if", "input(\"Are you sure? [NO/yes/info]: \") if ans == \"yes\": #wykonaj", "import KeePassError, get_password from configmanager import ConfigManager, ConfigManagerError common.init() class", "wszystkich serwerach, bazach które są w nim zapisane file =", "'\\\\' else: separator = '/' start = path.find(separator) end =", "\"_\" + str(connection[\"remoteport\"]) + \"_\" + perm return command except", "'->', db, ']') callback(file, srv, db, *args) except ConfigManagerError as", "chcemy wykonać val = values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy params = len(val)", "'' if '\\\\' in path: separator = '\\\\' else: separator", "KeePassError(\"Master Password Not Set\") try: return get_password(keepass_path, self.master, alias) except", "from configmanager import ConfigManager, ConfigManagerError common.init() class ParseArgsException(Exception): def __init__(self,", "+-', srv) databases = servers[srv][\"databases\"] for db in databases: print('|", "'tree': print('+-', file) try: servers = ConfigManager(\"config/\" + file +", "| +-', db) else: #jeżeli nie zdecydujemy się na wykonanie", "exist\") elif params == 3: # podano nazwę pliku, serwer", "servers: if view == 'tree': print(\"+-\", srv) databases = servers[srv][\"databases\"]", "print('+-', file) try: servers = ConfigManager(\"config/\" + file + \".yaml\").get_all()", "to use Keepass(\" + e1.value + \") or Password\") else:", "[f for f in self.directories if f.startswith(text)] return completions #", "srv in ConfigManager('config/' + conf + '.yaml').get_all(): self.file_server_database.append(conf + '.'", "in files: print('+-', file) servers = ConfigManager(\"config/\" + file +", "end: return (path[0:start+1] + '...' + path[end:]) else: return (path)", "db in databases: if view == 'tree': print(\"| +-\", db)", "db in ConfigManager('config/' + conf + '.yaml').get(srv)['databases']: self.file_server_database.append(conf + '.'", "self.file_server.append(conf) for srv in ConfigManager('config/' + conf + '.yaml').get_all(): self.file_server_database.append(conf", "directory\" for name in os.listdir(common.get_cdir()): print(name) # podaje pełną ścieżkę", "self.warn = True elif values[0] == 'off': print('Warnings off') self.warn", "cmd.Cmd.__init__(self) self.master = None if module == '#': self.prompt_sign =", "None: raise KeePassError(\"Master Password Not Set\") try: return get_password(keepass_path, self.master,", "do_exit(self, *args): return True def do_EOF(self, line): return True def", "m == 0) or n == 0: r_list = []", "os.listdir(common.get_cdir()): print(name) # podaje pełną ścieżkę aktualnego katalogu def do_pwd(self,", "''): cmd.Cmd.__init__(self) self.master = None if module == '#': self.prompt_sign", "daną funkcję (callback) na wszystkich bazach def exec_on_config(self, callback, args,", "if args == '': print(common.get_cdir()) else: try: common.chdir(args) self.prompt =", "else: raise KeePassError(\"Invalid connection in yaml file\") raise KeePassError(e1) return", "conf + '.yaml').get_all(): self.file_server_database.append(conf + '.' + srv) self.file_server.append(conf +", "+ file + \".yaml\").get_all() srv = val[1] databases = servers[srv][\"databases\"]", "else: raise ParseArgsException(\"Incorrect number of arguments\") # wykonuje daną funkcję", "'tree': print(\"+-\", db) if view == 'list': print('[', db, ']')", "def precmd(self, line): if not sys.stdin.isatty(): print(line) return line def", "db, *args) except ConfigManagerError as e: print(e) except KeyError as", "cmd polecenia cd def complete_cd(self, text, line, begidx, endidx): if", "# jezeli jako shell p = getpass('Enter Master Password: ')", "in ConfigManager('config/' + conf + '.yaml').get_all(): self.file_server_database.append(conf + '.' +", "of arguments\") # wykonuje daną funkcję (callback) na wszystkich bazach", "chcemy wyświetlać warningi def do_warn(self, args): \"\"\"warn <on/off>\"\"\" try: (values,", "Set\") try: return get_password(keepass_path, self.master, alias) except KeePassError as e:", "zly master itp. i zwrocic 1 wyjątek def get_password(self, alias):", "print('[', srv, '->', db, ']') callback(file, srv, db, *args) except", "na wszystkich plikach files = ConfigManager().get_config_list() # pobierz listę plików", "bazach na serwerze file = val[0] try: servers = ConfigManager(\"config/\"", "\".yaml\").get_all() for srv in servers: if view == 'tree': print(\"+-\",", "if values == '': # wykonaj na wszystkich plikach files", "== 3: # podano nazwę pliku, serwer i nazwę bazy", "'->', srv, '->', db, ']') callback(file, srv, db, *args) except", "print(e) elif ans == \"info\": #podaj tylko informację na czym", "do_warn(self, args): \"\"\"warn <on/off>\"\"\" try: (values, values_num) = self.parse_args(args, 0,", "p = getpass('Enter Master Password: ') else: p = sys.stdin.readline().rstrip()", "# podaje pełną ścieżkę aktualnego katalogu def do_pwd(self, args): \"Print", "pomocnicza def get_shortpath(self): path = common.get_cdir() separator = '' if", "common from getpass import getpass from kp import KeePassError, get_password", "import cmd import sys import common from getpass import getpass", "else: completions = [f for f in self.directories if f.startswith(text)]", "view == 'tree': print('+-', file) try: servers = ConfigManager(\"config/\" +", "'...' + path[end:]) else: return (path) # autouzupełnienia dla cmd", "'': self.prompt_sign = '[' + module + ']>' else: self.prompt_sign", "= [] self.file_server = [] self.do_cd('.') configs = ConfigManager().get_config_list() for", "które są w nim zapisane file = val[0] try: servers", "== '': # wykonaj na wszystkich plikach files = ConfigManager().get_config_list()", "sys.stdin.isatty(): print(line) return line def postcmd(self, stop, line): if not", "def emptyline(self): return False # Musimy wyłapać wszystko co możliwe,", "\"Print path\" print(common.get_cdir()) # pozwala na decyzję czy chcemy wyświetlać", "connect_command_builder(self,connection, perm): try: command = connection[\"adress\"] + \"_\" + connection[\"user\"]+", "e: print(e) # wyświetla wszystkie pliki w lokalizacji def do_ls(self,", "'->' #defaults self.ruler = '-' #Completions self.directories = [] self.file_server_database", "plików konfiguracyjnych # wyświetl na czym będziesz wykonywać print(\"Exec on:\")", "= val[1] databases = servers[srv][\"databases\"] for db in databases: if", "return get_password(keepass_path, self.master, alias) except KeePassError as e: raise e", "in files: if view == 'tree': print('+-', file) try: servers", "pliku i serwer to wykonaj na wszystkich bazach na serwerze", "sys.stdin.isatty(): # jezeli jako shell p = getpass('Enter Master Password:", "precmd(self, line): if not sys.stdin.isatty(): print(line) return line def postcmd(self,", "+ file + \".yaml\").get_all() for srv in servers: print('| +-',", "= re.findall('\"+.*\"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string) arg_counter = len(list); if (arg_counter >= n", "def exec_on_config(self, callback, args, values, view = ''): # link", "#rozdzielamy nazwę_pliku.serwera.bazy params = len(val) if params == 1: #", "def connect_command_builder(self,connection, perm): try: command = connection[\"adress\"] + \"_\" +", "2: # jeżeli podano nazwę pliku i serwer to wykonaj", "= self.get_shortpath() + ' ' + self.prompt_sign self.directories = []", "'')) return (r_list, len(list)) else: raise ParseArgsException(\"Incorrect number of arguments\")", "self.directories = [] for name in os.listdir(common.get_cdir()): if os.path.isdir(os.path.join(common.get_cdir(), name)):", "= '' if '\\\\' in path: separator = '\\\\' else:", "+ connection[\"user\"]+ \"_\" + \\ self.get_password(connection[\"keepass\"]) + \"_\" + str(connection[\"sshport\"])", "or n == 0: r_list = [] for l in", "pliku, serwer i nazwę bazy - wykonaj polecenie dokładnie na", "polecenie dokładnie na niej try: callback(val[0], val[1], val[2], *args) except", "stop def parse_args(self, string=\"\", n=0, m=0): list = re.findall('\"+.*\"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string)", "exec_on_config(self, callback, args, values, view = ''): # link -", "print('[', file, '->', srv, '->', db, ']') callback(file, srv, db,", "ConfigManager('config/' + conf + '.yaml').get_all(): self.file_server_database.append(conf + '.' + srv)", "+ ']>' else: self.prompt_sign = '->' #defaults self.ruler = '-'", "serwerze file = val[0] try: servers = ConfigManager(\"config/\" + file", "completions # polecenie cd - pozwala na przemieszczanie się po", "# wyświetla wszystkie pliki w lokalizacji def do_ls(self, args): \"List", "alias): keepass_path = common.keepass_path if self.master == None: raise KeePassError(\"Master", "podano nazwę pliku, serwer i nazwę bazy - wykonaj polecenie", "po katalogach def do_cd(self, args): \"Move to directory\" if args", "connection[\"adress\"] + \"_\" + connection[\"user\"]+ \"_\" + \\ self.get_password(connection[\"keepass\"]) +", "wyświetla wszystkie pliki w lokalizacji def do_ls(self, args): \"List directory\"", "na wszystkich bazach def exec_on_config(self, callback, args, values, view =", "== '#': self.prompt_sign = '#>' elif module != '': self.prompt_sign", "<on/off>\"\"\" try: (values, values_num) = self.parse_args(args, 0, 1) if values_num", "= ''): # link - file.server.base if values == '':", "db) else: #jeżeli nie zdecydujemy się na wykonanie czegokolwiek print(\"aborted\")", "'#>' elif module != '': self.prompt_sign = '[' + module", "getpass('Enter Master Password: ') else: p = sys.stdin.readline().rstrip() self.master =", "(KeyError, KeePassError) as e1: try: command = connection[\"adress\"] + \"_\"", "[] for l in list: r_list.append(l.replace('\"', '')) return (r_list, len(list))", "wszystkie pliki w lokalizacji def do_ls(self, args): \"List directory\" for", "Password Not Set\") try: return get_password(keepass_path, self.master, alias) except KeePassError", "= val[0] try: servers = ConfigManager(\"config/\" + file + \".yaml\").get_all()", "jezeli jako shell p = getpass('Enter Master Password: ') else:", "name in os.listdir(common.get_cdir()): if os.path.isdir(os.path.join(common.get_cdir(), name)): self.directories.append(name) except FileNotFoundError as", "ConfigManagerError as e: print(e) elif ans == \"info\": #podaj tylko", "return completions # polecenie cd - pozwala na przemieszczanie się", "\"_\" + \\ connection[\"passwd\"] + \"_\" + str(connection[\"sshport\"]) + \"_\"", "params == 2: # jeżeli podano nazwę pliku i serwer", "args): \"Move to directory\" if args == '': print(common.get_cdir()) else:", "line): return True def emptyline(self): return False # Musimy wyłapać", "'tree': print(\"| +-\", srv) databases = servers[srv][\"databases\"] for db in", "nazwę tylko pliku to wykonaj na wszystkich serwerach, bazach które", "+ \\ self.get_password(connection[\"keepass\"]) + \"_\" + str(connection[\"sshport\"]) + \"_\" +", "= '[' + module + ']>' else: self.prompt_sign = '->'", "files: if view == 'tree': print('+-', file) try: servers =", "(values, values_num) = self.parse_args(args, 0, 1) if values_num == 1:", "if f.startswith(text)] return completions # polecenie cd - pozwala na", "<= m) or (arg_counter == n and m == 0)", "\"Move to directory\" if args == '': print(common.get_cdir()) else: try:", "off') except ParseArgsException as e: print(e) # ustawia masterpassword dla", "'\\\\' in path: separator = '\\\\' else: separator = '/'", "Keepass(\" + e1.value + \") or Password\") else: raise KeePassError(\"Invalid", "(arg_counter >= n and arg_counter <= m) or (arg_counter ==", "__init__(self, module = ''): cmd.Cmd.__init__(self) self.master = None if module", "e: print(e, \"is not exist\") # zwraca skróconą ścieżkę do", "args): \"Print path\" print(common.get_cdir()) # pozwala na decyzję czy chcemy", "n and m == 0) or n == 0: r_list", "print('| +-', srv) databases = servers[srv][\"databases\"] for db in databases:", "+ str(connection[\"sshport\"]) + \"_\" + str(connection[\"remoteport\"]) + \"_\" + perm", "in path: separator = '\\\\' else: separator = '/' start", "self.file_server.append(conf + '.' + srv) for db in ConfigManager('config/' +", "print(e, \"is not exist\") # zwraca skróconą ścieżkę do aktualnego", "== 'list': print('[', db, ']') callback(file, srv, db, *args) except", "return False # Musimy wyłapać wszystko co możliwe, nie ma", "if '\\\\' in path: separator = '\\\\' else: separator =", "print(e) except KeyError as e: print(e, \"is not exist\") #", "na decyzję czy chcemy wyświetlać warningi def do_warn(self, args): \"\"\"warn", "connection[\"passwd\"] + \"_\" + str(connection[\"sshport\"]) + \"_\" + str(connection[\"remoteport\"]) +", "= common.get_cdir() separator = '' if '\\\\' in path: separator", "= ConfigManager().get_config_list() for conf in configs: self.file_server_database.append(conf) self.file_server.append(conf) for srv", "srv) self.file_server.append(conf + '.' + srv) for db in ConfigManager('config/'", "servers[srv][\"databases\"] for db in databases: if view == 'tree': print(\"+-\",", "val = values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy params = len(val) if params", "#Completions self.directories = [] self.file_server_database = [] self.file_server = []", "return True def do_EOF(self, line): return True def emptyline(self): return", "wykonywać print(\"Exec on:\") for file in files: print('+-',file) ans =", "databases = servers[srv][\"databases\"] for db in databases: print('| | +-',", "\".yaml\").get_all() for srv in servers: print('| +-', srv) databases =", "+ \"_\" + perm return command except KeyError as e2:", "+ srv) for db in ConfigManager('config/' + conf + '.yaml').get(srv)['databases']:", "'off': print('Warnings off') self.warn = False else: print('Incorrect argument.') else:", "print(e) except KeyError as e: print(e, \"is not exist\") elif", "na przemieszczanie się po katalogach def do_cd(self, args): \"Move to", "+ \".yaml\").get_all() srv = val[1] databases = servers[srv][\"databases\"] for db", "== 'tree': print(\"+-\", db) if view == 'list': print('[', db,", "e: print(e) # ustawia masterpassword dla keepasa def do_setMaster(self,args): \"Set", "0) or n == 0: r_list = [] for l", "from kp import KeePassError, get_password from configmanager import ConfigManager, ConfigManagerError", "= None if module == '#': self.prompt_sign = '#>' elif", "= [] self.file_server_database = [] self.file_server = [] self.do_cd('.') configs", "try: return get_password(keepass_path, self.master, alias) except KeePassError as e: raise", "= path.find(separator) end = path.rfind(separator, 0, len(path)-1) if start <", "*args) except ConfigManagerError as e: print(e) except KeyError as e:", "+ \"_\" + perm except (KeyError, KeePassError) as e1: try:", "self.msg = msg class ModuleCore(cmd.Cmd): def __init__(self, module = ''):", "p = sys.stdin.readline().rstrip() self.master = p def do_exit(self, *args): return", "on') self.warn = True elif values[0] == 'off': print('Warnings off')", "not exist\") elif params == 3: # podano nazwę pliku,", "endidx): if not text: completions = self.directories[:] else: completions =", "pełną ścieżkę aktualnego katalogu def do_pwd(self, args): \"Print path\" print(common.get_cdir())", "for name in os.listdir(common.get_cdir()): if os.path.isdir(os.path.join(common.get_cdir(), name)): self.directories.append(name) except FileNotFoundError", "except ParseArgsException as e: print(e) # ustawia masterpassword dla keepasa", "val[2], *args) except ConfigManagerError as e: print(e) except KeyError as", "(r_list, len(list)) else: raise ParseArgsException(\"Incorrect number of arguments\") # wykonuje", "except FileNotFoundError as e: print(e) # wyświetla wszystkie pliki w", "= [f for f in self.directories if f.startswith(text)] return completions", "if not sys.stdin.isatty(): print(line) return line def postcmd(self, stop, line):", "view == 'list': print('[', file, '->', srv, '->', db, ']')", "else: try: common.chdir(args) self.prompt = self.get_shortpath() + ' ' +", "else: print('Incorrect argument.') else: if self.warn == True: print('Status: on')", "KeyError as e: print(e, \"is not exist\") elif params ==", "'list': print('[', db, ']') callback(file, srv, db, *args) except ConfigManagerError", "masterpassword dla keepasa def do_setMaster(self,args): \"Set master password\" if sys.stdin.isatty():", "0, len(path)-1) if start < end: return (path[0:start+1] + '...'", "files: print('+-',file) ans = input(\"Are you sure? [NO/yes/info]: \") if", "print(common.get_cdir()) # pozwala na decyzję czy chcemy wyświetlać warningi def", "raise KeePassError(\"Master Password Not Set\") try: return get_password(keepass_path, self.master, alias)", "values_num) = self.parse_args(args, 0, 1) if values_num == 1: if", "plikach files = ConfigManager().get_config_list() # pobierz listę plików konfiguracyjnych #", "print(e, \"is not exist\") elif params == 3: # podano", "password\" if sys.stdin.isatty(): # jezeli jako shell p = getpass('Enter", "# zwraca skróconą ścieżkę do aktualnego katalogu - funkcja pomocnicza", "params = len(val) if params == 1: # jeżeli podano", "view == 'tree': print(\"+-\", srv) databases = servers[srv][\"databases\"] for db", "file.server.base if values == '': # wykonaj na wszystkich plikach", "\"is not exist\") elif params == 3: # podano nazwę", "alias) except KeePassError as e: raise e def connect_command_builder(self,connection, perm):", "string) arg_counter = len(list); if (arg_counter >= n and arg_counter", "for db in ConfigManager('config/' + conf + '.yaml').get(srv)['databases']: self.file_server_database.append(conf +", "podano nazwę pliku i serwer to wykonaj na wszystkich bazach", "module != '': self.prompt_sign = '[' + module + ']>'", "try: callback(val[0], val[1], val[2], *args) except ConfigManagerError as e: print(e)", "= [] for name in os.listdir(common.get_cdir()): if os.path.isdir(os.path.join(common.get_cdir(), name)): self.directories.append(name)", "pliku, zly master itp. i zwrocic 1 wyjątek def get_password(self,", "as e: print(e) elif ans == \"info\": #podaj tylko informację", "zostałby wykonany for file in files: print('+-', file) servers =", "for srv in servers: if view == 'tree': print(\"+-\", srv)", "print('Status: off') except ParseArgsException as e: print(e) # ustawia masterpassword", "listę plików konfiguracyjnych # wyświetl na czym będziesz wykonywać print(\"Exec", "and m == 0) or n == 0: r_list =", "perm except (KeyError, KeePassError) as e1: try: command = connection[\"adress\"]", "nazwę pliku, serwer i nazwę bazy - wykonaj polecenie dokładnie", "str(connection[\"sshport\"]) + \"_\" + str(connection[\"remoteport\"]) + \"_\" + perm except", "as e: print(e, \"is not exist\") elif params == 3:", "= False else: print('Incorrect argument.') else: if self.warn == True:", "self.master == None: raise KeePassError(\"Master Password Not Set\") try: return", "' + self.prompt_sign self.directories = [] for name in os.listdir(common.get_cdir()):", "callback(file, srv, db, *args) except ConfigManagerError as e: print(e) elif", "serwer to wykonaj na wszystkich bazach na serwerze file =", "db in databases: if view == 'tree': print(\"| | +-\",", "ModuleCore(cmd.Cmd): def __init__(self, module = ''): cmd.Cmd.__init__(self) self.master = None", "']') callback(file, srv, db, *args) except ConfigManagerError as e: print(e)", "path: separator = '\\\\' else: separator = '/' start =", "to directory\" if args == '': print(common.get_cdir()) else: try: common.chdir(args)", "= [] for l in list: r_list.append(l.replace('\"', '')) return (r_list,", "callback for file in files: if view == 'tree': print('+-',", "FileNotFoundError as e: print(e) # wyświetla wszystkie pliki w lokalizacji", "if view == 'tree': print(\"| | +-\", db) if view", "db, *args) except ConfigManagerError as e: print(e) elif ans ==", "ConfigManager(\"config/\" + file + \".yaml\").get_all() for srv in servers: if", "str(connection[\"sshport\"]) + \"_\" + str(connection[\"remoteport\"]) + \"_\" + perm return", "pozwala na decyzję czy chcemy wyświetlać warningi def do_warn(self, args):", "r_list = [] for l in list: r_list.append(l.replace('\"', '')) return", "try: (values, values_num) = self.parse_args(args, 0, 1) if values_num ==", "= [] self.do_cd('.') configs = ConfigManager().get_config_list() for conf in configs:", "# wykonuje daną funkcję (callback) na wszystkich bazach def exec_on_config(self,", "print(\"+-\", srv) databases = servers[srv][\"databases\"] for db in databases: if", "ConfigManager().get_config_list() for conf in configs: self.file_server_database.append(conf) self.file_server.append(conf) for srv in", "not sys.stdin.isatty(): print(\"\") return stop def parse_args(self, string=\"\", n=0, m=0):", "będziesz wykonywać print(\"Exec on:\") for file in files: print('+-',file) ans", "# podano nazwę pliku, serwer i nazwę bazy - wykonaj", "file in files: if view == 'tree': print('+-', file) try:", "isinstance(e1,KeePassError): raise KeePassError(\"Unable to use Keepass(\" + e1.value + \")", "def __init__(self, msg): self.msg = msg class ModuleCore(cmd.Cmd): def __init__(self,", "(path[0:start+1] + '...' + path[end:]) else: return (path) # autouzupełnienia", "informację na czym callback zostałby wykonany for file in files:", "print(common.get_cdir()) else: try: common.chdir(args) self.prompt = self.get_shortpath() + ' '", "print('+-',file) ans = input(\"Are you sure? [NO/yes/info]: \") if ans", "srv = val[1] databases = servers[srv][\"databases\"] for db in databases:", "complete_cd(self, text, line, begidx, endidx): if not text: completions =", "ustawia masterpassword dla keepasa def do_setMaster(self,args): \"Set master password\" if", "if not text: completions = self.directories[:] else: completions = [f", "srv) databases = servers[srv][\"databases\"] for db in databases: print('| |", "def postcmd(self, stop, line): if not sys.stdin.isatty(): print(\"\") return stop", "''): # link - file.server.base if values == '': #", "\"is not exist\") elif params == 2: # jeżeli podano", "values[0] == 'off': print('Warnings off') self.warn = False else: print('Incorrect", "ParseArgsException(Exception): def __init__(self, msg): self.msg = msg class ModuleCore(cmd.Cmd): def", "get_password(self, alias): keepass_path = common.keepass_path if self.master == None: raise", "# pobierz listę plików konfiguracyjnych # wyświetl na czym będziesz", "elif params == 2: # jeżeli podano nazwę pliku i", "srv, db, *args) except ConfigManagerError as e: print(e) elif ans", "do_setMaster(self,args): \"Set master password\" if sys.stdin.isatty(): # jezeli jako shell", "= common.keepass_path if self.master == None: raise KeePassError(\"Master Password Not", "file) servers = ConfigManager(\"config/\" + file + \".yaml\").get_all() for srv", "if view == 'list': print('[', db, ']') callback(file, srv, db,", "for name in os.listdir(common.get_cdir()): print(name) # podaje pełną ścieżkę aktualnego", "czy chcemy wyświetlać warningi def do_warn(self, args): \"\"\"warn <on/off>\"\"\" try:", "r_list.append(l.replace('\"', '')) return (r_list, len(list)) else: raise ParseArgsException(\"Incorrect number of", "db) if view == 'list': print('[', file, '->', srv, '->',", "serwer i nazwę bazy - wykonaj polecenie dokładnie na niej", "path.rfind(separator, 0, len(path)-1) if start < end: return (path[0:start+1] +", "perm): try: command = connection[\"adress\"] + \"_\" + connection[\"user\"]+ \"_\"", "os import cmd import sys import common from getpass import", "czym chcemy wykonać val = values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy params =", "+ \"_\" + connection[\"user\"]+ \"_\" + \\ self.get_password(connection[\"keepass\"]) + \"_\"", "to wykonaj na wszystkich serwerach, bazach które są w nim", "val[1], val[2], *args) except ConfigManagerError as e: print(e) except KeyError", "- funkcja pomocnicza def get_shortpath(self): path = common.get_cdir() separator =", "do_pwd(self, args): \"Print path\" print(common.get_cdir()) # pozwala na decyzję czy", "3: # podano nazwę pliku, serwer i nazwę bazy -", "def do_EOF(self, line): return True def emptyline(self): return False #", "raise KeePassError(\"Invalid connection in yaml file\") raise KeePassError(e1) return command", "+ conf + '.yaml').get(srv)['databases']: self.file_server_database.append(conf + '.' + srv +", "not text: completions = self.directories[:] else: completions = [f for", "== 'list': print('[', file, '->', srv, '->', db, ']') callback(file,", "in os.listdir(common.get_cdir()): print(name) # podaje pełną ścieżkę aktualnego katalogu def", "== 'tree': print(\"+-\", srv) databases = servers[srv][\"databases\"] for db in", "self.parse_args(args, 0, 1) if values_num == 1: if values[0] ==", "elif ans == \"info\": #podaj tylko informację na czym callback", "sys import common from getpass import getpass from kp import", "ConfigManager(\"config/\" + file + \".yaml\").get_all() srv = val[1] databases =", "[] self.do_cd('.') configs = ConfigManager().get_config_list() for conf in configs: self.file_server_database.append(conf)", "= input(\"Are you sure? [NO/yes/info]: \") if ans == \"yes\":", "module + ']>' else: self.prompt_sign = '->' #defaults self.ruler =", "begidx, endidx): if not text: completions = self.directories[:] else: completions", "= True elif values[0] == 'off': print('Warnings off') self.warn =", "path[end:]) else: return (path) # autouzupełnienia dla cmd polecenia cd", "if start < end: return (path[0:start+1] + '...' + path[end:])", "servers = ConfigManager(\"config/\" + file + \".yaml\").get_all() srv = val[1]", "if not sys.stdin.isatty(): print(\"\") return stop def parse_args(self, string=\"\", n=0,", "wyświetl na czym będziesz wykonywać print(\"Exec on:\") for file in", "jeżeli specjalizujemy na czym chcemy wykonać val = values.split('.') #rozdzielamy", "== 'list': print('[', srv, '->', db, ']') callback(file, srv, db,", "jako shell p = getpass('Enter Master Password: ') else: p", "command = connection[\"adress\"] + \"_\" + connection[\"user\"]+ \"_\" + \\", "ans == \"info\": #podaj tylko informację na czym callback zostałby", "arg_counter = len(list); if (arg_counter >= n and arg_counter <=", "callback zostałby wykonany for file in files: print('+-', file) servers", "wykonaj na wszystkich plikach files = ConfigManager().get_config_list() # pobierz listę", "+ srv) self.file_server.append(conf + '.' + srv) for db in", "\\ self.get_password(connection[\"keepass\"]) + \"_\" + str(connection[\"sshport\"]) + \"_\" + str(connection[\"remoteport\"])", ">= n and arg_counter <= m) or (arg_counter == n", "+ path[end:]) else: return (path) # autouzupełnienia dla cmd polecenia", "as e: print(e) # wyświetla wszystkie pliki w lokalizacji def", "for file in files: if view == 'tree': print('+-', file)", "callback(file, srv, db, *args) except ConfigManagerError as e: print(e) except", "params == 3: # podano nazwę pliku, serwer i nazwę", "sys.stdin.readline().rstrip() self.master = p def do_exit(self, *args): return True def", "None if module == '#': self.prompt_sign = '#>' elif module", "+ '.yaml').get(srv)['databases']: self.file_server_database.append(conf + '.' + srv + '.' +", "m=0): list = re.findall('\"+.*\"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string) arg_counter = len(list); if (arg_counter", "możliwe, nie ma pliku, zly master itp. i zwrocic 1", "print(\"| | +-\", db) if view == 'list': print('[', file,", "+-', db) else: #jeżeli nie zdecydujemy się na wykonanie czegokolwiek", "in files: print('+-',file) ans = input(\"Are you sure? [NO/yes/info]: \")", "self.directories[:] else: completions = [f for f in self.directories if", "Musimy wyłapać wszystko co możliwe, nie ma pliku, zly master", "czym będziesz wykonywać print(\"Exec on:\") for file in files: print('+-',file)", "args): \"\"\"warn <on/off>\"\"\" try: (values, values_num) = self.parse_args(args, 0, 1)", "in list: r_list.append(l.replace('\"', '')) return (r_list, len(list)) else: raise ParseArgsException(\"Incorrect", "in servers: print('| +-', srv) databases = servers[srv][\"databases\"] for db", "for db in databases: if view == 'tree': print(\"| |", "KeyError as e: print(e, \"is not exist\") # zwraca skróconą", "view == 'tree': print(\"| +-\", srv) databases = servers[srv][\"databases\"] for", "sys.stdin.isatty(): print(\"\") return stop def parse_args(self, string=\"\", n=0, m=0): list", "line): if not sys.stdin.isatty(): print(line) return line def postcmd(self, stop,", "+ '.' + srv + '.' + db) def precmd(self,", "for conf in configs: self.file_server_database.append(conf) self.file_server.append(conf) for srv in ConfigManager('config/'", "[] for name in os.listdir(common.get_cdir()): if os.path.isdir(os.path.join(common.get_cdir(), name)): self.directories.append(name) except", "self.master, alias) except KeePassError as e: raise e def connect_command_builder(self,connection,", "as e2: if isinstance(e1,KeePassError): raise KeePassError(\"Unable to use Keepass(\" +", "'list': print('[', file, '->', srv, '->', db, ']') callback(file, srv,", "if (arg_counter >= n and arg_counter <= m) or (arg_counter", "== 'tree': print(\"| +-\", srv) databases = servers[srv][\"databases\"] for db", "nie zdecydujemy się na wykonanie czegokolwiek print(\"aborted\") else: # jeżeli", "KeePassError(\"Unable to use Keepass(\" + e1.value + \") or Password\")", "= ConfigManager(\"config/\" + file + \".yaml\").get_all() for srv in servers:", "or Password\") else: raise KeePassError(\"Invalid connection in yaml file\") raise", "else: if self.warn == True: print('Status: on') else: print('Status: off')", "!= '': self.prompt_sign = '[' + module + ']>' else:", "str(connection[\"remoteport\"]) + \"_\" + perm except (KeyError, KeePassError) as e1:", "na wszystkich bazach na serwerze file = val[0] try: servers", "itp. i zwrocic 1 wyjątek def get_password(self, alias): keepass_path =", "dokładnie na niej try: callback(val[0], val[1], val[2], *args) except ConfigManagerError", "e1: try: command = connection[\"adress\"] + \"_\" + connection[\"user\"]+ \"_\"", "'[' + module + ']>' else: self.prompt_sign = '->' #defaults", "else: #jeżeli nie zdecydujemy się na wykonanie czegokolwiek print(\"aborted\") else:", "module == '#': self.prompt_sign = '#>' elif module != '':", "len(path)-1) if start < end: return (path[0:start+1] + '...' +", "do_EOF(self, line): return True def emptyline(self): return False # Musimy", "\"_\" + connection[\"user\"]+ \"_\" + \\ connection[\"passwd\"] + \"_\" +", "#defaults self.ruler = '-' #Completions self.directories = [] self.file_server_database =", "number of arguments\") # wykonuje daną funkcję (callback) na wszystkich", "as e: print(e, \"is not exist\") # zwraca skróconą ścieżkę", "kp import KeePassError, get_password from configmanager import ConfigManager, ConfigManagerError common.init()", "== \"info\": #podaj tylko informację na czym callback zostałby wykonany", "print('| | +-', db) else: #jeżeli nie zdecydujemy się na", "master itp. i zwrocic 1 wyjątek def get_password(self, alias): keepass_path", "zwrocic 1 wyjątek def get_password(self, alias): keepass_path = common.keepass_path if", "e: raise e def connect_command_builder(self,connection, perm): try: command = connection[\"adress\"]", "i serwer to wykonaj na wszystkich bazach na serwerze file", "KeyError as e2: if isinstance(e1,KeePassError): raise KeePassError(\"Unable to use Keepass(\"", "configs = ConfigManager().get_config_list() for conf in configs: self.file_server_database.append(conf) self.file_server.append(conf) for", "\") if ans == \"yes\": #wykonaj callback for file in", "connection[\"user\"]+ \"_\" + \\ connection[\"passwd\"] + \"_\" + str(connection[\"sshport\"]) +", "'/' start = path.find(separator) end = path.rfind(separator, 0, len(path)-1) if", "\"_\" + str(connection[\"sshport\"]) + \"_\" + str(connection[\"remoteport\"]) + \"_\" +", "pliki w lokalizacji def do_ls(self, args): \"List directory\" for name", "path = common.get_cdir() separator = '' if '\\\\' in path:", "= connection[\"adress\"] + \"_\" + connection[\"user\"]+ \"_\" + \\ self.get_password(connection[\"keepass\"])", "self.do_cd('.') configs = ConfigManager().get_config_list() for conf in configs: self.file_server_database.append(conf) self.file_server.append(conf)", "try: servers = ConfigManager(\"config/\" + file + \".yaml\").get_all() for srv", "bazach def exec_on_config(self, callback, args, values, view = ''): #", "try: servers = ConfigManager(\"config/\" + file + \".yaml\").get_all() srv =", "view == 'list': print('[', db, ']') callback(file, srv, db, *args)", "file + \".yaml\").get_all() for srv in servers: print('| +-', srv)", "i zwrocic 1 wyjątek def get_password(self, alias): keepass_path = common.keepass_path", "view == 'list': print('[', srv, '->', db, ']') callback(file, srv,", "print(e, \"is not exist\") elif params == 2: # jeżeli", "# jeżeli specjalizujemy na czym chcemy wykonać val = values.split('.')", "0, 1) if values_num == 1: if values[0] == 'on':", "lokalizacji def do_ls(self, args): \"List directory\" for name in os.listdir(common.get_cdir()):", "= '#>' elif module != '': self.prompt_sign = '[' +", "file in files: print('+-',file) ans = input(\"Are you sure? [NO/yes/info]:", "\"_\" + \\ self.get_password(connection[\"keepass\"]) + \"_\" + str(connection[\"sshport\"]) + \"_\"", "common.init() class ParseArgsException(Exception): def __init__(self, msg): self.msg = msg class", "re import os import cmd import sys import common from", "else: p = sys.stdin.readline().rstrip() self.master = p def do_exit(self, *args):", "zdecydujemy się na wykonanie czegokolwiek print(\"aborted\") else: # jeżeli specjalizujemy", "+ \\ connection[\"passwd\"] + \"_\" + str(connection[\"sshport\"]) + \"_\" +", "'.yaml').get_all(): self.file_server_database.append(conf + '.' + srv) self.file_server.append(conf + '.' +", "else: separator = '/' start = path.find(separator) end = path.rfind(separator,", "e1.value + \") or Password\") else: raise KeePassError(\"Invalid connection in", "srv) databases = servers[srv][\"databases\"] for db in databases: if view", "off') self.warn = False else: print('Incorrect argument.') else: if self.warn", "except KeyError as e: print(e, \"is not exist\") elif params", "separator = '\\\\' else: separator = '/' start = path.find(separator)", "file + \".yaml\").get_all() srv = val[1] databases = servers[srv][\"databases\"] for", "os.path.isdir(os.path.join(common.get_cdir(), name)): self.directories.append(name) except FileNotFoundError as e: print(e) # wyświetla", "import re import os import cmd import sys import common", "str(connection[\"remoteport\"]) + \"_\" + perm return command except KeyError as", "= ConfigManager(\"config/\" + file + \".yaml\").get_all() srv = val[1] databases", "ścieżkę aktualnego katalogu def do_pwd(self, args): \"Print path\" print(common.get_cdir()) #", "== 2: # jeżeli podano nazwę pliku i serwer to", "' ' + self.prompt_sign self.directories = [] for name in", "self.directories if f.startswith(text)] return completions # polecenie cd - pozwala", "ans = input(\"Are you sure? [NO/yes/info]: \") if ans ==", "wykonaj polecenie dokładnie na niej try: callback(val[0], val[1], val[2], *args)", "path.find(separator) end = path.rfind(separator, 0, len(path)-1) if start < end:", "# Musimy wyłapać wszystko co możliwe, nie ma pliku, zly", "'': print(common.get_cdir()) else: try: common.chdir(args) self.prompt = self.get_shortpath() + '", "\"_\" + perm return command except KeyError as e2: if", "+ perm return command except KeyError as e2: if isinstance(e1,KeePassError):", "return stop def parse_args(self, string=\"\", n=0, m=0): list = re.findall('\"+.*\"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+',", "\"yes\": #wykonaj callback for file in files: if view ==", "katalogach def do_cd(self, args): \"Move to directory\" if args ==", "'.' + srv) self.file_server.append(conf + '.' + srv) for db", "print('Status: on') else: print('Status: off') except ParseArgsException as e: print(e)", "# autouzupełnienia dla cmd polecenia cd def complete_cd(self, text, line,", "argument.') else: if self.warn == True: print('Status: on') else: print('Status:", "+ self.prompt_sign self.directories = [] for name in os.listdir(common.get_cdir()): if", "== None: raise KeePassError(\"Master Password Not Set\") try: return get_password(keepass_path,", "self.prompt_sign = '[' + module + ']>' else: self.prompt_sign =", "self.prompt_sign = '->' #defaults self.ruler = '-' #Completions self.directories =", "= self.directories[:] else: completions = [f for f in self.directories", "raise ParseArgsException(\"Incorrect number of arguments\") # wykonuje daną funkcję (callback)", "+ '.' + srv) self.file_server.append(conf + '.' + srv) for", "czym callback zostałby wykonany for file in files: print('+-', file)", "na niej try: callback(val[0], val[1], val[2], *args) except ConfigManagerError as", "na czym callback zostałby wykonany for file in files: print('+-',", "databases = servers[srv][\"databases\"] for db in databases: if view ==", "completions = [f for f in self.directories if f.startswith(text)] return", "else: print('Status: off') except ParseArgsException as e: print(e) # ustawia", "True def do_EOF(self, line): return True def emptyline(self): return False", "nazwę_pliku.serwera.bazy params = len(val) if params == 1: # jeżeli", "= ''): cmd.Cmd.__init__(self) self.master = None if module == '#':", "val[1] databases = servers[srv][\"databases\"] for db in databases: if view", "__init__(self, msg): self.msg = msg class ModuleCore(cmd.Cmd): def __init__(self, module", "def do_pwd(self, args): \"Print path\" print(common.get_cdir()) # pozwala na decyzję", "Not Set\") try: return get_password(keepass_path, self.master, alias) except KeePassError as", "print(line) return line def postcmd(self, stop, line): if not sys.stdin.isatty():", "(callback) na wszystkich bazach def exec_on_config(self, callback, args, values, view", "wykonuje daną funkcję (callback) na wszystkich bazach def exec_on_config(self, callback,", "print(\"| +-\", srv) databases = servers[srv][\"databases\"] for db in databases:", "1: if values[0] == 'on': print('Warnings on') self.warn = True", "if view == 'list': print('[', srv, '->', db, ']') callback(file,", "warningi def do_warn(self, args): \"\"\"warn <on/off>\"\"\" try: (values, values_num) =", "common.keepass_path if self.master == None: raise KeePassError(\"Master Password Not Set\")", "servers = ConfigManager(\"config/\" + file + \".yaml\").get_all() for srv in", "servers: print('| +-', srv) databases = servers[srv][\"databases\"] for db in", "na wszystkich serwerach, bazach które są w nim zapisane file", "< end: return (path[0:start+1] + '...' + path[end:]) else: return", "for db in databases: if view == 'tree': print(\"| +-\",", "+ '.' + srv) for db in ConfigManager('config/' + conf", "== '': print(common.get_cdir()) else: try: common.chdir(args) self.prompt = self.get_shortpath() +", "decyzję czy chcemy wyświetlać warningi def do_warn(self, args): \"\"\"warn <on/off>\"\"\"", "view = ''): # link - file.server.base if values ==", "if view == 'tree': print(\"+-\", srv) databases = servers[srv][\"databases\"] for", "katalogu - funkcja pomocnicza def get_shortpath(self): path = common.get_cdir() separator", "db) if view == 'list': print('[', srv, '->', db, ']')", "os.listdir(common.get_cdir()): if os.path.isdir(os.path.join(common.get_cdir(), name)): self.directories.append(name) except FileNotFoundError as e: print(e)", "'#': self.prompt_sign = '#>' elif module != '': self.prompt_sign =", "servers: if view == 'tree': print(\"| +-\", srv) databases =", "for db in databases: print('| | +-', db) else: #jeżeli", "as e: print(e, \"is not exist\") elif params == 2:", "wyłapać wszystko co możliwe, nie ma pliku, zly master itp.", "+ conf + '.yaml').get_all(): self.file_server_database.append(conf + '.' + srv) self.file_server.append(conf", "self.prompt_sign self.directories = [] for name in os.listdir(common.get_cdir()): if os.path.isdir(os.path.join(common.get_cdir(),", "self.directories.append(name) except FileNotFoundError as e: print(e) # wyświetla wszystkie pliki", "def do_setMaster(self,args): \"Set master password\" if sys.stdin.isatty(): # jezeli jako", "on:\") for file in files: print('+-',file) ans = input(\"Are you", "= servers[srv][\"databases\"] for db in databases: if view == 'tree':", "for f in self.directories if f.startswith(text)] return completions # polecenie", "print('[', db, ']') callback(file, srv, db, *args) except ConfigManagerError as", "if sys.stdin.isatty(): # jezeli jako shell p = getpass('Enter Master", "shell p = getpass('Enter Master Password: ') else: p =", "values[0] == 'on': print('Warnings on') self.warn = True elif values[0]", "return line def postcmd(self, stop, line): if not sys.stdin.isatty(): print(\"\")", "pliku to wykonaj na wszystkich serwerach, bazach które są w", "self.get_shortpath() + ' ' + self.prompt_sign self.directories = [] for", "specjalizujemy na czym chcemy wykonać val = values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy", "db in databases: if view == 'tree': print(\"+-\", db) if", "connection[\"adress\"] + \"_\" + connection[\"user\"]+ \"_\" + \\ connection[\"passwd\"] +", "in ConfigManager('config/' + conf + '.yaml').get(srv)['databases']: self.file_server_database.append(conf + '.' +", "i nazwę bazy - wykonaj polecenie dokładnie na niej try:", "+ \"_\" + connection[\"user\"]+ \"_\" + \\ connection[\"passwd\"] + \"_\"", "get_password from configmanager import ConfigManager, ConfigManagerError common.init() class ParseArgsException(Exception): def", "elif module != '': self.prompt_sign = '[' + module +", "= sys.stdin.readline().rstrip() self.master = p def do_exit(self, *args): return True", "exist\") # zwraca skróconą ścieżkę do aktualnego katalogu - funkcja", "try: command = connection[\"adress\"] + \"_\" + connection[\"user\"]+ \"_\" +", "not sys.stdin.isatty(): print(line) return line def postcmd(self, stop, line): if", "import getpass from kp import KeePassError, get_password from configmanager import", "return True def emptyline(self): return False # Musimy wyłapać wszystko", "aktualnego katalogu - funkcja pomocnicza def get_shortpath(self): path = common.get_cdir()", "databases: if view == 'tree': print(\"| | +-\", db) if", "db) if view == 'list': print('[', db, ']') callback(file, srv,", "# link - file.server.base if values == '': # wykonaj", "databases: if view == 'tree': print(\"| +-\", db) if view", "print(name) # podaje pełną ścieżkę aktualnego katalogu def do_pwd(self, args):", "def do_ls(self, args): \"List directory\" for name in os.listdir(common.get_cdir()): print(name)", "+ str(connection[\"remoteport\"]) + \"_\" + perm except (KeyError, KeePassError) as", "\".yaml\").get_all() for srv in servers: if view == 'tree': print(\"|", "cmd import sys import common from getpass import getpass from", "skróconą ścieżkę do aktualnego katalogu - funkcja pomocnicza def get_shortpath(self):", "def get_password(self, alias): keepass_path = common.keepass_path if self.master == None:", "string=\"\", n=0, m=0): list = re.findall('\"+.*\"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string) arg_counter = len(list);", "False # Musimy wyłapać wszystko co możliwe, nie ma pliku,", "completions = self.directories[:] else: completions = [f for f in", "# jeżeli podano nazwę tylko pliku to wykonaj na wszystkich", "*args) except ConfigManagerError as e: print(e) elif ans == \"info\":", "pozwala na przemieszczanie się po katalogach def do_cd(self, args): \"Move", "== n and m == 0) or n == 0:", "conf in configs: self.file_server_database.append(conf) self.file_server.append(conf) for srv in ConfigManager('config/' +", "# jeżeli podano nazwę pliku i serwer to wykonaj na", "args == '': print(common.get_cdir()) else: try: common.chdir(args) self.prompt = self.get_shortpath()", "def __init__(self, module = ''): cmd.Cmd.__init__(self) self.master = None if", "else: self.prompt_sign = '->' #defaults self.ruler = '-' #Completions self.directories", "= msg class ModuleCore(cmd.Cmd): def __init__(self, module = ''): cmd.Cmd.__init__(self)", "return (r_list, len(list)) else: raise ParseArgsException(\"Incorrect number of arguments\") #", "'tree': print(\"+-\", srv) databases = servers[srv][\"databases\"] for db in databases:", "do_ls(self, args): \"List directory\" for name in os.listdir(common.get_cdir()): print(name) #", "= getpass('Enter Master Password: ') else: p = sys.stdin.readline().rstrip() self.master", "= '\\\\' else: separator = '/' start = path.find(separator) end", "m) or (arg_counter == n and m == 0) or", "perm return command except KeyError as e2: if isinstance(e1,KeePassError): raise", "self.file_server_database.append(conf + '.' + srv + '.' + db) def", "się na wykonanie czegokolwiek print(\"aborted\") else: # jeżeli specjalizujemy na", "import common from getpass import getpass from kp import KeePassError,", "name in os.listdir(common.get_cdir()): print(name) # podaje pełną ścieżkę aktualnego katalogu", "e: print(e, \"is not exist\") elif params == 2: #", "False else: print('Incorrect argument.') else: if self.warn == True: print('Status:", "') else: p = sys.stdin.readline().rstrip() self.master = p def do_exit(self,", "tylko informację na czym callback zostałby wykonany for file in", "values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy params = len(val) if params == 1:", "class ModuleCore(cmd.Cmd): def __init__(self, module = ''): cmd.Cmd.__init__(self) self.master =", "'list': print('[', srv, '->', db, ']') callback(file, srv, db, *args)", "\"_\" + connection[\"user\"]+ \"_\" + \\ self.get_password(connection[\"keepass\"]) + \"_\" +", "= '->' #defaults self.ruler = '-' #Completions self.directories = []", "\"_\" + str(connection[\"remoteport\"]) + \"_\" + perm except (KeyError, KeePassError)", "if ans == \"yes\": #wykonaj callback for file in files:", "def parse_args(self, string=\"\", n=0, m=0): list = re.findall('\"+.*\"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string) arg_counter", "\"info\": #podaj tylko informację na czym callback zostałby wykonany for", "wykonać val = values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy params = len(val) if", "class ParseArgsException(Exception): def __init__(self, msg): self.msg = msg class ModuleCore(cmd.Cmd):", "+ srv + '.' + db) def precmd(self, line): if", "+ file + \".yaml\").get_all() for srv in servers: if view", "in servers: if view == 'tree': print(\"| +-\", srv) databases", "katalogu def do_pwd(self, args): \"Print path\" print(common.get_cdir()) # pozwala na", "\"List directory\" for name in os.listdir(common.get_cdir()): print(name) # podaje pełną", "#wykonaj callback for file in files: if view == 'tree':", "w nim zapisane file = val[0] try: servers = ConfigManager(\"config/\"", "# ustawia masterpassword dla keepasa def do_setMaster(self,args): \"Set master password\"", "if os.path.isdir(os.path.join(common.get_cdir(), name)): self.directories.append(name) except FileNotFoundError as e: print(e) #", "raise KeePassError(\"Unable to use Keepass(\" + e1.value + \") or", "jeżeli podano nazwę pliku i serwer to wykonaj na wszystkich", "list: r_list.append(l.replace('\"', '')) return (r_list, len(list)) else: raise ParseArgsException(\"Incorrect number", "emptyline(self): return False # Musimy wyłapać wszystko co możliwe, nie", "exist\") elif params == 2: # jeżeli podano nazwę pliku", "== 'off': print('Warnings off') self.warn = False else: print('Incorrect argument.')", "+ module + ']>' else: self.prompt_sign = '->' #defaults self.ruler", "autouzupełnienia dla cmd polecenia cd def complete_cd(self, text, line, begidx,", "import os import cmd import sys import common from getpass", "'-' #Completions self.directories = [] self.file_server_database = [] self.file_server =", "= '-' #Completions self.directories = [] self.file_server_database = [] self.file_server", "file, '->', srv, '->', db, ']') callback(file, srv, db, *args)", "print(\"aborted\") else: # jeżeli specjalizujemy na czym chcemy wykonać val", "\"_\" + perm except (KeyError, KeePassError) as e1: try: command", "servers[srv][\"databases\"] for db in databases: if view == 'tree': print(\"|", "def get_shortpath(self): path = common.get_cdir() separator = '' if '\\\\'", "in configs: self.file_server_database.append(conf) self.file_server.append(conf) for srv in ConfigManager('config/' + conf", "funkcję (callback) na wszystkich bazach def exec_on_config(self, callback, args, values,", "ConfigManagerError common.init() class ParseArgsException(Exception): def __init__(self, msg): self.msg = msg", "- wykonaj polecenie dokładnie na niej try: callback(val[0], val[1], val[2],", "'.yaml').get(srv)['databases']: self.file_server_database.append(conf + '.' + srv + '.' + db)", "in databases: if view == 'tree': print(\"| +-\", db) if", "you sure? [NO/yes/info]: \") if ans == \"yes\": #wykonaj callback", "+ ' ' + self.prompt_sign self.directories = [] for name", "self.master = None if module == '#': self.prompt_sign = '#>'", "print(e) # wyświetla wszystkie pliki w lokalizacji def do_ls(self, args):", "connection[\"user\"]+ \"_\" + \\ self.get_password(connection[\"keepass\"]) + \"_\" + str(connection[\"sshport\"]) +", "val[0] try: servers = ConfigManager(\"config/\" + file + \".yaml\").get_all() for", "dla cmd polecenia cd def complete_cd(self, text, line, begidx, endidx):", "separator = '/' start = path.find(separator) end = path.rfind(separator, 0,", "wyświetlać warningi def do_warn(self, args): \"\"\"warn <on/off>\"\"\" try: (values, values_num)", "for db in databases: if view == 'tree': print(\"+-\", db)", "0: r_list = [] for l in list: r_list.append(l.replace('\"', ''))", "+ '...' + path[end:]) else: return (path) # autouzupełnienia dla", "print(e) # ustawia masterpassword dla keepasa def do_setMaster(self,args): \"Set master", "= self.parse_args(args, 0, 1) if values_num == 1: if values[0]", "line): if not sys.stdin.isatty(): print(\"\") return stop def parse_args(self, string=\"\",", "in databases: if view == 'tree': print(\"+-\", db) if view", "na wykonanie czegokolwiek print(\"aborted\") else: # jeżeli specjalizujemy na czym", "ConfigManager, ConfigManagerError common.init() class ParseArgsException(Exception): def __init__(self, msg): self.msg =", "zwraca skróconą ścieżkę do aktualnego katalogu - funkcja pomocnicza def", "wszystko co możliwe, nie ma pliku, zly master itp. i", "e: print(e) elif ans == \"info\": #podaj tylko informację na", "== True: print('Status: on') else: print('Status: off') except ParseArgsException as", "and arg_counter <= m) or (arg_counter == n and m", "files = ConfigManager().get_config_list() # pobierz listę plików konfiguracyjnych # wyświetl", "wszystkich bazach na serwerze file = val[0] try: servers =", "def do_exit(self, *args): return True def do_EOF(self, line): return True", "get_shortpath(self): path = common.get_cdir() separator = '' if '\\\\' in", "self.ruler = '-' #Completions self.directories = [] self.file_server_database = []", "directory\" if args == '': print(common.get_cdir()) else: try: common.chdir(args) self.prompt", "self.warn = False else: print('Incorrect argument.') else: if self.warn ==", "= servers[srv][\"databases\"] for db in databases: print('| | +-', db)", "True elif values[0] == 'off': print('Warnings off') self.warn = False", "'.' + srv + '.' + db) def precmd(self, line):", "wykonanie czegokolwiek print(\"aborted\") else: # jeżeli specjalizujemy na czym chcemy", "1 wyjątek def get_password(self, alias): keepass_path = common.keepass_path if self.master", "są w nim zapisane file = val[0] try: servers =", "on') else: print('Status: off') except ParseArgsException as e: print(e) #", "databases: if view == 'tree': print(\"+-\", db) if view ==", "srv) for db in ConfigManager('config/' + conf + '.yaml').get(srv)['databases']: self.file_server_database.append(conf", "[] self.file_server = [] self.do_cd('.') configs = ConfigManager().get_config_list() for conf", "values == '': # wykonaj na wszystkich plikach files =", "KeePassError) as e1: try: command = connection[\"adress\"] + \"_\" +", "values, view = ''): # link - file.server.base if values", "command except KeyError as e2: if isinstance(e1,KeePassError): raise KeePassError(\"Unable to", "files: print('+-', file) servers = ConfigManager(\"config/\" + file + \".yaml\").get_all()", "l in list: r_list.append(l.replace('\"', '')) return (r_list, len(list)) else: raise", "+ '.' + db) def precmd(self, line): if not sys.stdin.isatty():", "bazach które są w nim zapisane file = val[0] try:", "niej try: callback(val[0], val[1], val[2], *args) except ConfigManagerError as e:", "in databases: if view == 'tree': print(\"| | +-\", db)", "== 'tree': print('+-', file) try: servers = ConfigManager(\"config/\" + file", "if view == 'tree': print('+-', file) try: servers = ConfigManager(\"config/\"", "== 1: # jeżeli podano nazwę tylko pliku to wykonaj", "def do_warn(self, args): \"\"\"warn <on/off>\"\"\" try: (values, values_num) = self.parse_args(args,", "arg_counter <= m) or (arg_counter == n and m ==", "\"is not exist\") # zwraca skróconą ścieżkę do aktualnego katalogu", "ConfigManagerError as e: print(e) except KeyError as e: print(e, \"is", "(path) # autouzupełnienia dla cmd polecenia cd def complete_cd(self, text,", "1) if values_num == 1: if values[0] == 'on': print('Warnings", "callback, args, values, view = ''): # link - file.server.base", "== 0) or n == 0: r_list = [] for", "self.file_server_database.append(conf) self.file_server.append(conf) for srv in ConfigManager('config/' + conf + '.yaml').get_all():", "nazwę pliku i serwer to wykonaj na wszystkich bazach na", "master password\" if sys.stdin.isatty(): # jezeli jako shell p =", "keepass_path = common.keepass_path if self.master == None: raise KeePassError(\"Master Password", "self.master = p def do_exit(self, *args): return True def do_EOF(self,", "except KeyError as e2: if isinstance(e1,KeePassError): raise KeePassError(\"Unable to use", "configs: self.file_server_database.append(conf) self.file_server.append(conf) for srv in ConfigManager('config/' + conf +", "except (KeyError, KeePassError) as e1: try: command = connection[\"adress\"] +", "self.file_server_database = [] self.file_server = [] self.do_cd('.') configs = ConfigManager().get_config_list()", "for srv in ConfigManager('config/' + conf + '.yaml').get_all(): self.file_server_database.append(conf +", "True def emptyline(self): return False # Musimy wyłapać wszystko co", "+ \") or Password\") else: raise KeePassError(\"Invalid connection in yaml", "self.file_server_database.append(conf + '.' + srv) self.file_server.append(conf + '.' + srv)", "view == 'tree': print(\"| +-\", db) if view == 'list':", "line, begidx, endidx): if not text: completions = self.directories[:] else:", "args, values, view = ''): # link - file.server.base if", "ConfigManager('config/' + conf + '.yaml').get(srv)['databases']: self.file_server_database.append(conf + '.' + srv", "ans == \"yes\": #wykonaj callback for file in files: if", "srv in servers: print('| +-', srv) databases = servers[srv][\"databases\"] for", "na serwerze file = val[0] try: servers = ConfigManager(\"config/\" +", "+-\", srv) databases = servers[srv][\"databases\"] for db in databases: if", "f.startswith(text)] return completions # polecenie cd - pozwala na przemieszczanie", "for l in list: r_list.append(l.replace('\"', '')) return (r_list, len(list)) else:", "== 1: if values[0] == 'on': print('Warnings on') self.warn =", "True: print('Status: on') else: print('Status: off') except ParseArgsException as e:", "#podaj tylko informację na czym callback zostałby wykonany for file", "import ConfigManager, ConfigManagerError common.init() class ParseArgsException(Exception): def __init__(self, msg): self.msg", "params == 1: # jeżeli podano nazwę tylko pliku to", "module = ''): cmd.Cmd.__init__(self) self.master = None if module ==", "start = path.find(separator) end = path.rfind(separator, 0, len(path)-1) if start", "in self.directories if f.startswith(text)] return completions # polecenie cd -", "serwerach, bazach które są w nim zapisane file = val[0]", "ścieżkę do aktualnego katalogu - funkcja pomocnicza def get_shortpath(self): path", "przemieszczanie się po katalogach def do_cd(self, args): \"Move to directory\"", "file = val[0] try: servers = ConfigManager(\"config/\" + file +", "ConfigManager().get_config_list() # pobierz listę plików konfiguracyjnych # wyświetl na czym", "+-\", db) if view == 'list': print('[', file, '->', srv,", "n and arg_counter <= m) or (arg_counter == n and", "parse_args(self, string=\"\", n=0, m=0): list = re.findall('\"+.*\"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string) arg_counter =", "else: return (path) # autouzupełnienia dla cmd polecenia cd def", "+ \".yaml\").get_all() for srv in servers: if view == 'tree':", "else: # jeżeli specjalizujemy na czym chcemy wykonać val =", "# polecenie cd - pozwala na przemieszczanie się po katalogach", "f in self.directories if f.startswith(text)] return completions # polecenie cd", "co możliwe, nie ma pliku, zly master itp. i zwrocic", "konfiguracyjnych # wyświetl na czym będziesz wykonywać print(\"Exec on:\") for", "== 'tree': print(\"| | +-\", db) if view == 'list':", "polecenia cd def complete_cd(self, text, line, begidx, endidx): if not", "+ \"_\" + str(connection[\"remoteport\"]) + \"_\" + perm return command", "in servers: if view == 'tree': print(\"+-\", srv) databases =", "try: common.chdir(args) self.prompt = self.get_shortpath() + ' ' + self.prompt_sign", "if view == 'tree': print(\"| +-\", srv) databases = servers[srv][\"databases\"]", "if self.warn == True: print('Status: on') else: print('Status: off') except", "nazwę bazy - wykonaj polecenie dokładnie na niej try: callback(val[0],", "'tree': print(\"| | +-\", db) if view == 'list': print('[',", "cd - pozwala na przemieszczanie się po katalogach def do_cd(self,", "for srv in servers: if view == 'tree': print(\"| +-\",", "e: print(e) except KeyError as e: print(e, \"is not exist\")", "except KeyError as e: print(e, \"is not exist\") # zwraca", "+ e1.value + \") or Password\") else: raise KeePassError(\"Invalid connection", "\") or Password\") else: raise KeePassError(\"Invalid connection in yaml file\")", "if view == 'tree': print(\"| +-\", db) if view ==", "podano nazwę tylko pliku to wykonaj na wszystkich serwerach, bazach", "len(list); if (arg_counter >= n and arg_counter <= m) or", "dla keepasa def do_setMaster(self,args): \"Set master password\" if sys.stdin.isatty(): #", "msg): self.msg = msg class ModuleCore(cmd.Cmd): def __init__(self, module =", "postcmd(self, stop, line): if not sys.stdin.isatty(): print(\"\") return stop def", "# wykonaj na wszystkich plikach files = ConfigManager().get_config_list() # pobierz", "def do_cd(self, args): \"Move to directory\" if args == '':", "raise e def connect_command_builder(self,connection, perm): try: command = connection[\"adress\"] +", "len(val) if params == 1: # jeżeli podano nazwę tylko", "except ConfigManagerError as e: print(e) elif ans == \"info\": #podaj", "aktualnego katalogu def do_pwd(self, args): \"Print path\" print(common.get_cdir()) # pozwala", "configmanager import ConfigManager, ConfigManagerError common.init() class ParseArgsException(Exception): def __init__(self, msg):", "Password: ') else: p = sys.stdin.readline().rstrip() self.master = p def", "= '/' start = path.find(separator) end = path.rfind(separator, 0, len(path)-1)", "if values[0] == 'on': print('Warnings on') self.warn = True elif", "= values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy params = len(val) if params ==", "wykonany for file in files: print('+-', file) servers = ConfigManager(\"config/\"", "#jeżeli nie zdecydujemy się na wykonanie czegokolwiek print(\"aborted\") else: #", "podaje pełną ścieżkę aktualnego katalogu def do_pwd(self, args): \"Print path\"", "self.prompt = self.get_shortpath() + ' ' + self.prompt_sign self.directories =", "self.get_password(connection[\"keepass\"]) + \"_\" + str(connection[\"sshport\"]) + \"_\" + str(connection[\"remoteport\"]) +", "msg class ModuleCore(cmd.Cmd): def __init__(self, module = ''): cmd.Cmd.__init__(self) self.master", "== 'tree': print(\"| +-\", db) if view == 'list': print('[',", "def complete_cd(self, text, line, begidx, endidx): if not text: completions", "args): \"List directory\" for name in os.listdir(common.get_cdir()): print(name) # podaje", "Password\") else: raise KeePassError(\"Invalid connection in yaml file\") raise KeePassError(e1)", "n == 0: r_list = [] for l in list:", "text: completions = self.directories[:] else: completions = [f for f", "+ db) def precmd(self, line): if not sys.stdin.isatty(): print(line) return", "# pozwala na decyzję czy chcemy wyświetlać warningi def do_warn(self,", "print(\"\") return stop def parse_args(self, string=\"\", n=0, m=0): list =", "return (path[0:start+1] + '...' + path[end:]) else: return (path) #", "'': # wykonaj na wszystkich plikach files = ConfigManager().get_config_list() #", "\"\"\"warn <on/off>\"\"\" try: (values, values_num) = self.parse_args(args, 0, 1) if", "KeePassError as e: raise e def connect_command_builder(self,connection, perm): try: command", "e: print(e, \"is not exist\") elif params == 3: #", "file in files: print('+-', file) servers = ConfigManager(\"config/\" + file", "stop, line): if not sys.stdin.isatty(): print(\"\") return stop def parse_args(self,", "elif params == 3: # podano nazwę pliku, serwer i", "list = re.findall('\"+.*\"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string) arg_counter = len(list); if (arg_counter >=", "ConfigManager(\"config/\" + file + \".yaml\").get_all() for srv in servers: print('|", "do aktualnego katalogu - funkcja pomocnicza def get_shortpath(self): path =", "srv in servers: if view == 'tree': print(\"| +-\", srv)", "na czym chcemy wykonać val = values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy params", "+ \"_\" + str(connection[\"remoteport\"]) + \"_\" + perm except (KeyError,", "KeePassError, get_password from configmanager import ConfigManager, ConfigManagerError common.init() class ParseArgsException(Exception):", "+ perm except (KeyError, KeePassError) as e1: try: command =", "common.get_cdir() separator = '' if '\\\\' in path: separator =", "line def postcmd(self, stop, line): if not sys.stdin.isatty(): print(\"\") return", "Master Password: ') else: p = sys.stdin.readline().rstrip() self.master = p", "srv + '.' + db) def precmd(self, line): if not", "db) def precmd(self, line): if not sys.stdin.isatty(): print(line) return line", "pobierz listę plików konfiguracyjnych # wyświetl na czym będziesz wykonywać", "== \"yes\": #wykonaj callback for file in files: if view", "'on': print('Warnings on') self.warn = True elif values[0] == 'off':", "funkcja pomocnicza def get_shortpath(self): path = common.get_cdir() separator = ''", "sure? [NO/yes/info]: \") if ans == \"yes\": #wykonaj callback for", "= ConfigManager().get_config_list() # pobierz listę plików konfiguracyjnych # wyświetl na", "in os.listdir(common.get_cdir()): if os.path.isdir(os.path.join(common.get_cdir(), name)): self.directories.append(name) except FileNotFoundError as e:", "+ str(connection[\"remoteport\"]) + \"_\" + perm return command except KeyError", "']>' else: self.prompt_sign = '->' #defaults self.ruler = '-' #Completions", "bazy - wykonaj polecenie dokładnie na niej try: callback(val[0], val[1],", "values_num == 1: if values[0] == 'on': print('Warnings on') self.warn", "or (arg_counter == n and m == 0) or n", "view == 'tree': print(\"+-\", db) if view == 'list': print('[',", "srv, db, *args) except ConfigManagerError as e: print(e) except KeyError", "e2: if isinstance(e1,KeePassError): raise KeePassError(\"Unable to use Keepass(\" + e1.value", "= p def do_exit(self, *args): return True def do_EOF(self, line):", "'.' + db) def precmd(self, line): if not sys.stdin.isatty(): print(line)", "db, ']') callback(file, srv, db, *args) except ConfigManagerError as e:", "get_password(keepass_path, self.master, alias) except KeePassError as e: raise e def", "getpass from kp import KeePassError, get_password from configmanager import ConfigManager,", "if module == '#': self.prompt_sign = '#>' elif module !=", "from getpass import getpass from kp import KeePassError, get_password from", "conf + '.yaml').get(srv)['databases']: self.file_server_database.append(conf + '.' + srv + '.'", "common.chdir(args) self.prompt = self.get_shortpath() + ' ' + self.prompt_sign self.directories", "1: # jeżeli podano nazwę tylko pliku to wykonaj na", "print('Warnings off') self.warn = False else: print('Incorrect argument.') else: if", "as e1: try: command = connection[\"adress\"] + \"_\" + connection[\"user\"]+" ]
[ "test and trail function u = self._fes.TrialFunction() v = self._fes.TestFunction()", "solution {}\".format(fempde1.normL2())) print(\"solving took {} sec\".format(fempde1.exec_time)) print(\"solving uses {} Mb\".format(fempde1.mem_consumption/1000000))", "maximum number of degrees of freedom that can be created", "--------------------------------------------------------------------# if __name__ == \"__main__\": fempde1 = FemPde1(True) print(fempde1.pde_string) try:", "adaptive loop while self._fes.ndof < self.max_ndof: self._solveStep() self._estimateError() self._mesh.Refine() #", "import psutil import gc class FemPde1(FemPdeBase): \"\"\" **Implementation of PDE1", "solves the pde by calling ngsolve, provides: static condensation, adaptive", "3.830256175994873 >>> fempde2.mem_consumption 76705792 \"\"\" def __init__(self, show_gui, max_ndof=50000): super().__init__(show_gui)", "- 200y^9(1-y)^9 + 90y^{10}(1-y)^8] \\Omega: \\mathbf{x} \\in [0,1] u(\\mathbf{x})|_{\\partial \\Omega}", "numpy.ndarray with shape (2,) _mesh -> ngs.comp.Mesh _ngs_ex -> ngs.fem.CoefficientFunction", "\\ -(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx # preconditioner: multigrid", "is numer of cores) with ngs.TaskManager(): # this is the", "between exact and approx solution also sets execution time and", "create mesh with initial size 0.1 self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1)) #create", "freedom that can be created in the adaptive mesh refinement,", "norm to the real solution {}\".format(fempde1.normL2())) print(\"solving took {} sec\".format(fempde1.exec_time))", "the internal variables for evaluating the exact solution and calculating", "tstart # set measured used memory memstop = process.memory_info().vms -", "u(\\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10} - 200x^9(1-x)^9 + 90x^{10}(1-x)^8] -2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10} - 200y^9(1-y)^9", "= ngs.HDiv(self._mesh, order=2, autoupdate=True) self._gf_flux = ngs.GridFunction(self._space_flux, \"flux\", autoupdate=True) #", "and memory consumption Examples -------- >>> import numpy as np", "# create grid function that holds the solution and set", "self.show_gui: ngs.Draw(self._gfu) # set measured exectution time self._exec_time = time.time()", "\\ -(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \\ -(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9", "--------------------------------------------------------------------# # measure how much memory is used until here", "1.01, 0.01) X, Y = np.meshgrid(x, y) zs0 = np.array([fempde1.exact(\\", "is the adaptive loop while self._fes.ndof < self.max_ndof: self._solveStep() self._estimateError()", "- 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \\ -(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8))", "from ngsolve import ngsolve as ngs from netgen.geom2d import unit_square", "# create mesh with initial size 0.1 self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1))", "ax.set_zlabel(\"f(X0, X1)\") plt.show() fig.savefig(\"sol_pde_1.pdf\", bbox_inches='tight') fig = plt.figure() ax =", ">>> fempde2.mem_consumption 76705792 \"\"\" def __init__(self, show_gui, max_ndof=50000): super().__init__(show_gui) #", "of cores) with ngs.TaskManager(): # this is the adaptive loop", "the exact solution and calculating the distance between exact and", "X1)\") plt.show() fig.savefig(\"sol_pde_1.pdf\", bbox_inches='tight') fig = plt.figure() ax = fig.add_subplot(111,", "space for flux calculation and estimate error self._space_flux = ngs.HDiv(self._mesh,", "- memstart self._mem_consumption = memstop # enable garbage collector #", "real solution {}\".format(fempde1.normL2())) print(\"solving took {} sec\".format(fempde1.exec_time)) print(\"solving uses {}", "self._fes.TrialFunction() v = self._fes.TestFunction() # create bilinear form and enable", "except: print(\"Î error message above\") try: fempde1.approx(np.array([0.5,0.5])) except: print(\"Î error", "int the maximum number of degrees of freedom that can", "- 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))\" self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10) # init public", "u = self._fes.TrialFunction() v = self._fes.TestFunction() # create bilinear form", "@author: Nicolai \"\"\" import sys import os importpath = os.path.dirname(os.path.realpath(__file__))", "and apply RHS self._f = ngs.LinearForm(self._fes) self._f += ( \\", "\"\"\" import sys import os importpath = os.path.dirname(os.path.realpath(__file__)) + \"/../\"", "import Axes3D import matplotlib.pyplot as plt from matplotlib import cm", "\"flux\", autoupdate=True) # TaskManager starts threads that (standard thread nr", "ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0, X1)\") plt.show() fig.savefig(\"sol_pde_1.pdf\", bbox_inches='tight') fig = plt.figure()", "- \\Delta u(\\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10} - 200x^9(1-x)^9 + 90x^{10}(1-x)^8] -2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10}", "sec\".format(fempde1.exec_time)) print(\"solving uses {} Mb\".format(fempde1.mem_consumption/1000000)) from mpl_toolkits.mplot3d import Axes3D import", "self._mesh.Refine() # since the adaptive loop stopped with a mesh", "sys import os importpath = os.path.dirname(os.path.realpath(__file__)) + \"/../\" sys.path.append(importpath) from", "solution and calculating the distance between exact and approx solution", "collector # --------------------------------------------------------------------# gc.enable() gc.collect() # --------------------------------------------------------------------# if __name__ ==", "in gui if self.show_gui: ngs.Draw(self._gfu) # create Hcurl space for", "0.5) = {}\".format(fempde1.approx(np.array([0.5,0.5])))) print(\"L2 norm to the real solution {}\".format(fempde1.normL2()))", "ax.plot_surface(X, Y, Z, cmap=cm.gnuplot) fig.tight_layout() ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0, X1)\") plt.show()", "ngs.TaskManager(): # this is the adaptive loop while self._fes.ndof <", "psutil.Process() memstart = process.memory_info().vms # starts timer tstart = time.time()", "[0,1] u(\\mathbf{x})|_{\\partial \\Omega} = 0 **with the solution:** .. math::", "memstart = process.memory_info().vms # starts timer tstart = time.time() if", "functional and apply RHS self._f = ngs.LinearForm(self._fes) self._f += (", "# calculated one last time self._solveStep() if self.show_gui: ngs.Draw(self._gfu) #", "{}\".format(fempde1.normL2())) print(\"solving took {} sec\".format(fempde1.exec_time)) print(\"solving uses {} Mb\".format(fempde1.mem_consumption/1000000)) from", "memory consumption Examples -------- >>> import numpy as np >>>", "= zs0.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=cm.gnuplot) fig.tight_layout() ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0,", "= memstop # enable garbage collector # --------------------------------------------------------------------# gc.enable() gc.collect()", "(2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10) # init public self.max_ndof = max_ndof def solve(self): #", "in the adaptive mesh refinement, standard value is 50000 Methods", "-*- coding: utf-8 -*- \"\"\" Created on Mon Apr 13", "Y = np.meshgrid(x, y) zs0 = np.array([fempde1.exact(\\ np.array([x,y])) for x,y", "5.853102150391562e-07 >>> fempde2.exec_time 3.830256175994873 >>> fempde2.mem_consumption 76705792 \"\"\" def __init__(self,", "Z = zs0.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=cm.gnuplot) fig.tight_layout() ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\")", "condense=True) self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx # creat linear functional and apply", ">>> fempde2.normL2() 5.853102150391562e-07 >>> fempde2.exec_time 3.830256175994873 >>> fempde2.mem_consumption 76705792 \"\"\"", "garbage collector # --------------------------------------------------------------------# gc.disable() while(gc.isenabled()): time.sleep(0.1) # --------------------------------------------------------------------# #", "= FemPde2(True) >>> pos = np.array([0.5, 0.5]) >>> fempde2.exact(pos) >>>", "#create finite element space self._fes = ngs.H1(self._mesh, order=2, dirichlet=\".*\", autoupdate=True)", "ngs.fem.CoefficientFunction -> try to call solve() first >>> fempde2.solve() >>>", "for flux calculation and estimate error self._space_flux = ngs.HDiv(self._mesh, order=2,", "last time self._solveStep() if self.show_gui: ngs.Draw(self._gfu) # set measured exectution", ">>> fempde2.exact(pos) 1.0 >>> fempde2.approx(pos) 0.999998924259486 >>> fempde2.normL2() 5.853102150391562e-07 >>>", "y) zs0 = np.array([fempde1.exact(\\ np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])", "except: print(\"Î error message above\") fempde1.solve() print(\"-------------------------------------\") print(\"exact(0.5, 0.5) =", "self._f += ( \\ -(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \\", "adaptive loop stopped with a mesh refinement, the gfu must", "prerequisits must the problem have? self._c = ngs.Preconditioner(self._a,\"multigrid\") # create", "utf-8 -*- \"\"\" Created on Mon Apr 13 14:57:32 2020", "Hcurl space for flux calculation and estimate error self._space_flux =", "(2,) _mesh -> ngs.comp.Mesh _ngs_ex -> ngs.fem.CoefficientFunction -> try to", "stopped with a mesh refinement, the gfu must be #", "took {} sec\".format(fempde1.exec_time)) print(\"solving uses {} Mb\".format(fempde1.mem_consumption/1000000)) from mpl_toolkits.mplot3d import", "= ngs.Mesh(unit_square.GenerateMesh(maxh=0.1)) #create finite element space self._fes = ngs.H1(self._mesh, order=2,", "-> ngs.fem.CoefficientFunction -> try to call solve() first >>> fempde2.solve()", "set measured used memory memstop = process.memory_info().vms - memstart self._mem_consumption", "# creat linear functional and apply RHS self._f = ngs.LinearForm(self._fes)", "apply RHS self._f = ngs.LinearForm(self._fes) self._f += ( \\ -(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10", "print(\"exact(0.5, 0.5) = {}\".format(fempde1.exact(np.array([0.5,0.5])))) print(\"approx(0.5, 0.5) = {}\".format(fempde1.approx(np.array([0.5,0.5])))) print(\"L2 norm", "ngs.BilinearForm(self._fes, condense=True) self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx # creat linear functional and", "numpy as np # import from ngsolve import ngsolve as", "\"\"\" def __init__(self, show_gui, max_ndof=50000): super().__init__(show_gui) # init protected self._pde_string", "= ngs.GridFunction(self._fes, autoupdate=True) # solution self._g = 0.0 self._gfu.Set(self._g, definedon=self._mesh.Boundaries(\".*\"))", "(where possible), sets the internal variables for evaluating the exact", "the adaptive loop while self._fes.ndof < self.max_ndof: self._solveStep() self._estimateError() self._mesh.Refine()", "plt.figure() ax = fig.add_subplot(111, projection='3d') x = y = np.arange(0,", "sets the internal variables for evaluating the exact solution and", "netgen.gui # create mesh with initial size 0.1 self._mesh =", "pos = np.array([0.5, 0.5]) >>> fempde2.exact(pos) >>> x -> numpy.ndarray", "create grid function that holds the solution and set the", "= process.memory_info().vms # starts timer tstart = time.time() if self.show_gui:", "# since the adaptive loop stopped with a mesh refinement,", "+ 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx # preconditioner: multigrid - what prerequisits must", "math:: - \\Delta u(\\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10} - 200x^9(1-x)^9 + 90x^{10}(1-x)^8]", "Nicolai \"\"\" import sys import os importpath = os.path.dirname(os.path.realpath(__file__)) +", "ngs.LinearForm(self._fes) self._f += ( \\ -(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8))", "static condensation, adaptive mesh refinement, parallelisation (where possible), sets the", "-(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))\"", "-> try to call solve() first >>> fempde2.solve() >>> fempde2.exact(pos)", "self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True) self._gf_flux = ngs.GridFunction(self._space_flux, \"flux\", autoupdate=True)", "import sys import os importpath = os.path.dirname(os.path.realpath(__file__)) + \"/../\" sys.path.append(importpath)", "# this is the adaptive loop while self._fes.ndof < self.max_ndof:", "PDE1 of the testbed:** .. math:: - \\Delta u(\\mathbf{x}) =", "psutil import gc class FemPde1(FemPdeBase): \"\"\" **Implementation of PDE1 of", "and set the boundary to 0 self._gfu = ngs.GridFunction(self._fes, autoupdate=True)", "solution self._g = 0.0 self._gfu.Set(self._g, definedon=self._mesh.Boundaries(\".*\")) # draw grid function", "estimate error self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True) self._gf_flux = ngs.GridFunction(self._space_flux,", "threads that (standard thread nr is numer of cores) with", "ngs.grad(u)*ngs.grad(v)*ngs.dx # creat linear functional and apply RHS self._f =", "**with the solution:** .. math:: u(\\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10} Attributes ----------", "fempde2.exec_time 3.830256175994873 >>> fempde2.mem_consumption 76705792 \"\"\" def __init__(self, show_gui, max_ndof=50000):", "try: fempde1.approx(np.array([0.5,0.5])) except: print(\"Î error message above\") fempde1.solve() print(\"-------------------------------------\") print(\"exact(0.5,", ">>> import numpy as np >>> fempde2 = FemPde2(True) >>>", "while self._fes.ndof < self.max_ndof: self._solveStep() self._estimateError() self._mesh.Refine() # since the", "print(\"approx(0.5, 0.5) = {}\".format(fempde1.approx(np.array([0.5,0.5])))) print(\"L2 norm to the real solution", "= 0 **with the solution:** .. math:: u(\\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10}", "_ngs_ex -> ngs.fem.CoefficientFunction -> try to call solve() first >>>", "the problem have? self._c = ngs.Preconditioner(self._a,\"multigrid\") # create grid function", "size 0.1 self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1)) #create finite element space self._fes", "calling ngsolve, provides: static condensation, adaptive mesh refinement, parallelisation (where", "refinement, standard value is 50000 Methods ------- solve() solves the", "u(\\mathbf{x})|_{\\partial \\Omega} = 0 **with the solution:** .. math:: u(\\mathbf{x})", "self._gfu.Set(self._g, definedon=self._mesh.Boundaries(\".*\")) # draw grid function in gui if self.show_gui:", "order=2, autoupdate=True) self._gf_flux = ngs.GridFunction(self._space_flux, \"flux\", autoupdate=True) # TaskManager starts", "Y, Z, cmap=cm.gnuplot) fig.tight_layout() ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0, X1)\") plt.show() fig.savefig(\"sol_pde_1.pdf\",", "coding: utf-8 -*- \"\"\" Created on Mon Apr 13 14:57:32", "with initial size 0.1 self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1)) #create finite element", "garbage collector # --------------------------------------------------------------------# gc.enable() gc.collect() # --------------------------------------------------------------------# if __name__", "process = psutil.Process() memstart = process.memory_info().vms # starts timer tstart", "thread nr is numer of cores) with ngs.TaskManager(): # this", "memstart self._mem_consumption = memstop # enable garbage collector # --------------------------------------------------------------------#", "grid function in gui if self.show_gui: ngs.Draw(self._gfu) # create Hcurl", "memstop = process.memory_info().vms - memstart self._mem_consumption = memstop # enable", "starts threads that (standard thread nr is numer of cores)", "mesh with initial size 0.1 self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1)) #create finite", "from FemPdeBase import FemPdeBase import numpy as np # import", "space self._fes = ngs.H1(self._mesh, order=2, dirichlet=\".*\", autoupdate=True) # test and", "zs0.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=cm.gnuplot) fig.tight_layout() ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0, X1)\")", "import cm fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x", "from netgen.geom2d import unit_square import time import psutil import gc", "time self._exec_time = time.time() - tstart # set measured used", "init protected self._pde_string = \"-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 +", "fempde2.normL2() 5.853102150391562e-07 >>> fempde2.exec_time 3.830256175994873 >>> fempde2.mem_consumption 76705792 \"\"\" def", "from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib", "solve(self): # disable garbage collector # --------------------------------------------------------------------# gc.disable() while(gc.isenabled()): time.sleep(0.1)", "print(\"Î error message above\") fempde1.solve() print(\"-------------------------------------\") print(\"exact(0.5, 0.5) = {}\".format(fempde1.exact(np.array([0.5,0.5]))))", "fempde2.exact(pos) 1.0 >>> fempde2.approx(pos) 0.999998924259486 >>> fempde2.normL2() 5.853102150391562e-07 >>> fempde2.exec_time", "np.array([fempde1.approx(\\ np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))]) Z = zs0.reshape(X.shape)", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Apr", "provides: static condensation, adaptive mesh refinement, parallelisation (where possible), sets", "fempde2 = FemPde2(True) >>> pos = np.array([0.5, 0.5]) >>> fempde2.exact(pos)", "with shape (2,) _mesh -> ngs.comp.Mesh _ngs_ex -> ngs.fem.CoefficientFunction ->", "process.memory_info().vms - memstart self._mem_consumption = memstop # enable garbage collector", "np # import from ngsolve import ngsolve as ngs from", "== \"__main__\": fempde1 = FemPde1(True) print(fempde1.pde_string) try: fempde1.exact(np.array([0.5,0.5])) except: print(\"Î", "refinement, the gfu must be # calculated one last time", "self._g = 0.0 self._gfu.Set(self._g, definedon=self._mesh.Boundaries(\".*\")) # draw grid function in", "number of degrees of freedom that can be created in", "the testbed:** .. math:: - \\Delta u(\\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10} -", "gc.enable() gc.collect() # --------------------------------------------------------------------# if __name__ == \"__main__\": fempde1 =", "os.path.dirname(os.path.realpath(__file__)) + \"/../\" sys.path.append(importpath) from FemPdeBase import FemPdeBase import numpy", "= {}\".format(fempde1.exact(np.array([0.5,0.5])))) print(\"approx(0.5, 0.5) = {}\".format(fempde1.approx(np.array([0.5,0.5])))) print(\"L2 norm to the", "-(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))\" self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10) # init", "also sets execution time and memory consumption Examples -------- >>>", "enable static condensation self._a = ngs.BilinearForm(self._fes, condense=True) self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx", "projection='3d') x = y = np.arange(0, 1.01, 0.01) X, Y", "print(\"solving uses {} Mb\".format(fempde1.mem_consumption/1000000)) from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot", "above\") fempde1.solve() print(\"-------------------------------------\") print(\"exact(0.5, 0.5) = {}\".format(fempde1.exact(np.array([0.5,0.5])))) print(\"approx(0.5, 0.5) =", ">>> pos = np.array([0.5, 0.5]) >>> fempde2.exact(pos) >>> x ->", "loop stopped with a mesh refinement, the gfu must be", "np.ravel(Y))]) Z = zs0.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=cm.gnuplot) ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\")", "# --------------------------------------------------------------------# gc.disable() while(gc.isenabled()): time.sleep(0.1) # --------------------------------------------------------------------# # measure how", "calculated one last time self._solveStep() if self.show_gui: ngs.Draw(self._gfu) # set", "adaptive mesh refinement, standard value is 50000 Methods ------- solve()", "import numpy as np >>> fempde2 = FemPde2(True) >>> pos", "fempde1.exact(np.array([0.5,0.5])) except: print(\"Î error message above\") try: fempde1.approx(np.array([0.5,0.5])) except: print(\"Î", "numpy as np >>> fempde2 = FemPde2(True) >>> pos =", "self._fes.ndof < self.max_ndof: self._solveStep() self._estimateError() self._mesh.Refine() # since the adaptive", "-*- \"\"\" Created on Mon Apr 13 14:57:32 2020 @author:", "condensation, adaptive mesh refinement, parallelisation (where possible), sets the internal", "calculation and estimate error self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True) self._gf_flux", "fig.savefig(\"sol_pde_1.pdf\", bbox_inches='tight') fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x", "set the boundary to 0 self._gfu = ngs.GridFunction(self._fes, autoupdate=True) #", "import gc class FemPde1(FemPdeBase): \"\"\" **Implementation of PDE1 of the", "fempde1.approx(np.array([0.5,0.5])) except: print(\"Î error message above\") fempde1.solve() print(\"-------------------------------------\") print(\"exact(0.5, 0.5)", "by calling ngsolve, provides: static condensation, adaptive mesh refinement, parallelisation", "measured exectution time self._exec_time = time.time() - tstart # set", "with a mesh refinement, the gfu must be # calculated", "= (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10) # init public self.max_ndof = max_ndof def solve(self):", "= max_ndof def solve(self): # disable garbage collector # --------------------------------------------------------------------#", "exectution time self._exec_time = time.time() - tstart # set measured", "def __init__(self, show_gui, max_ndof=50000): super().__init__(show_gui) # init protected self._pde_string =", "-> ngs.comp.Mesh _ngs_ex -> ngs.fem.CoefficientFunction -> try to call solve()", "x = y = np.arange(0, 1.01, 0.01) X, Y =", "RHS self._f = ngs.LinearForm(self._fes) self._f += ( \\ -(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 -", "90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))\" self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10) #", "importpath = os.path.dirname(os.path.realpath(__file__)) + \"/../\" sys.path.append(importpath) from FemPdeBase import FemPdeBase", "if self.show_gui: ngs.Draw(self._gfu) # set measured exectution time self._exec_time =", "since the adaptive loop stopped with a mesh refinement, the", "of degrees of freedom that can be created in the", "self.show_gui: ngs.Draw(self._gfu) # create Hcurl space for flux calculation and", "how much memory is used until here process = psutil.Process()", "approx solution also sets execution time and memory consumption Examples", "import from ngsolve import ngsolve as ngs from netgen.geom2d import", "TaskManager starts threads that (standard thread nr is numer of", "import ngsolve as ngs from netgen.geom2d import unit_square import time", "200y^9(1-y)^9 + 90y^{10}(1-y)^8] \\Omega: \\mathbf{x} \\in [0,1] u(\\mathbf{x})|_{\\partial \\Omega} =", "multigrid - what prerequisits must the problem have? self._c =", "import unit_square import time import psutil import gc class FemPde1(FemPdeBase):", "= plt.figure() ax = fig.add_subplot(111, projection='3d') x = y =", "holds the solution and set the boundary to 0 self._gfu", "set measured exectution time self._exec_time = time.time() - tstart #", "cmap=cm.gnuplot) fig.tight_layout() ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0, X1)\") plt.show() fig.savefig(\"sol_pde_1.pdf\", bbox_inches='tight') fig", "as np >>> fempde2 = FemPde2(True) >>> pos = np.array([0.5,", "solution:** .. math:: u(\\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10} Attributes ---------- max_nodf: int", "= \"-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 -", "FemPdeBase import numpy as np # import from ngsolve import", "= FemPde1(True) print(fempde1.pde_string) try: fempde1.exact(np.array([0.5,0.5])) except: print(\"Î error message above\")", "fig.tight_layout() ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0, X1)\") plt.show() fig.savefig(\"sol_pde_1.pdf\", bbox_inches='tight') fig =", "# set measured exectution time self._exec_time = time.time() - tstart", "that holds the solution and set the boundary to 0", "print(\"L2 norm to the real solution {}\".format(fempde1.normL2())) print(\"solving took {}", "0.5) = {}\".format(fempde1.exact(np.array([0.5,0.5])))) print(\"approx(0.5, 0.5) = {}\".format(fempde1.approx(np.array([0.5,0.5])))) print(\"L2 norm to", "function u = self._fes.TrialFunction() v = self._fes.TestFunction() # create bilinear", "ngsolve import ngsolve as ngs from netgen.geom2d import unit_square import", "+= ngs.grad(u)*ngs.grad(v)*ngs.dx # creat linear functional and apply RHS self._f", "self._f = ngs.LinearForm(self._fes) self._f += ( \\ -(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9", "gfu must be # calculated one last time self._solveStep() if", "definedon=self._mesh.Boundaries(\".*\")) # draw grid function in gui if self.show_gui: ngs.Draw(self._gfu)", "that can be created in the adaptive mesh refinement, standard", "pde by calling ngsolve, provides: static condensation, adaptive mesh refinement,", "ngs.Draw(self._gfu) # create Hcurl space for flux calculation and estimate", ".. math:: - \\Delta u(\\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10} - 200x^9(1-x)^9 +", "# create bilinear form and enable static condensation self._a =", "= np.meshgrid(x, y) zs0 = np.array([fempde1.exact(\\ np.array([x,y])) for x,y in", "to call solve() first >>> fempde2.solve() >>> fempde2.exact(pos) 1.0 >>>", "# preconditioner: multigrid - what prerequisits must the problem have?", "memstop # enable garbage collector # --------------------------------------------------------------------# gc.enable() gc.collect() #", "FemPde1(True) print(fempde1.pde_string) try: fempde1.exact(np.array([0.5,0.5])) except: print(\"Î error message above\") try:", "= os.path.dirname(os.path.realpath(__file__)) + \"/../\" sys.path.append(importpath) from FemPdeBase import FemPdeBase import", "90x^{10}(1-x)^8] -2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10} - 200y^9(1-y)^9 + 90y^{10}(1-y)^8] \\Omega: \\mathbf{x} \\in [0,1]", "\\Omega} = 0 **with the solution:** .. math:: u(\\mathbf{x}) =", "and calculating the distance between exact and approx solution also", "90*y^10*(1-y)^8))\" self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10) # init public self.max_ndof = max_ndof", ")*v*ngs.dx # preconditioner: multigrid - what prerequisits must the problem", "zip(np.ravel(X), np.ravel(Y))]) Z = zs0.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=cm.gnuplot) ax.set_xlabel(\"X0\")", "exact solution and calculating the distance between exact and approx", "cm fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x =", "static condensation self._a = ngs.BilinearForm(self._fes, condense=True) self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx #", "linear functional and apply RHS self._f = ngs.LinearForm(self._fes) self._f +=", "200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx # preconditioner: multigrid - what prerequisits", "can be created in the adaptive mesh refinement, standard value", "be created in the adaptive mesh refinement, standard value is", "{}\".format(fempde1.exact(np.array([0.5,0.5])))) print(\"approx(0.5, 0.5) = {}\".format(fempde1.approx(np.array([0.5,0.5])))) print(\"L2 norm to the real", "self._gfu = ngs.GridFunction(self._fes, autoupdate=True) # solution self._g = 0.0 self._gfu.Set(self._g,", "ngs.Mesh(unit_square.GenerateMesh(maxh=0.1)) #create finite element space self._fes = ngs.H1(self._mesh, order=2, dirichlet=\".*\",", "= psutil.Process() memstart = process.memory_info().vms # starts timer tstart =", "= time.time() - tstart # set measured used memory memstop", "Mon Apr 13 14:57:32 2020 @author: Nicolai \"\"\" import sys", "the adaptive mesh refinement, standard value is 50000 Methods -------", "= ngs.Preconditioner(self._a,\"multigrid\") # create grid function that holds the solution", "# enable garbage collector # --------------------------------------------------------------------# gc.enable() gc.collect() # --------------------------------------------------------------------#", "self._a = ngs.BilinearForm(self._fes, condense=True) self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx # creat linear", "and approx solution also sets execution time and memory consumption", "self.show_gui: import netgen.gui # create mesh with initial size 0.1", "\"\"\" Created on Mon Apr 13 14:57:32 2020 @author: Nicolai", "0.5]) >>> fempde2.exact(pos) >>> x -> numpy.ndarray with shape (2,)", "solution also sets execution time and memory consumption Examples --------", "used until here process = psutil.Process() memstart = process.memory_info().vms #", "- 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))\" self._ngs_ex", "creat linear functional and apply RHS self._f = ngs.LinearForm(self._fes) self._f", "must be # calculated one last time self._solveStep() if self.show_gui:", "fempde2.solve() >>> fempde2.exact(pos) 1.0 >>> fempde2.approx(pos) 0.999998924259486 >>> fempde2.normL2() 5.853102150391562e-07", "y = np.arange(0, 1.01, 0.01) X, Y = np.meshgrid(x, y)", "# measure how much memory is used until here process", "calculating the distance between exact and approx solution also sets", "draw grid function in gui if self.show_gui: ngs.Draw(self._gfu) # create", "Created on Mon Apr 13 14:57:32 2020 @author: Nicolai \"\"\"", "mesh refinement, parallelisation (where possible), sets the internal variables for", "--------------------------------------------------------------------# gc.enable() gc.collect() # --------------------------------------------------------------------# if __name__ == \"__main__\": fempde1", "fig.add_subplot(111, projection='3d') x = y = np.arange(0, 1.01, 0.01) X,", "element space self._fes = ngs.H1(self._mesh, order=2, dirichlet=\".*\", autoupdate=True) # test", "# create Hcurl space for flux calculation and estimate error", "autoupdate=True) # test and trail function u = self._fes.TrialFunction() v", "x,y in zip(np.ravel(X), np.ravel(Y))]) Z = zs0.reshape(X.shape) ax.plot_surface(X, Y, Z,", "error self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True) self._gf_flux = ngs.GridFunction(self._space_flux, \"flux\",", "import numpy as np # import from ngsolve import ngsolve", "------- solve() solves the pde by calling ngsolve, provides: static", "= self._fes.TrialFunction() v = self._fes.TestFunction() # create bilinear form and", "first >>> fempde2.solve() >>> fempde2.exact(pos) 1.0 >>> fempde2.approx(pos) 0.999998924259486 >>>", "error message above\") fempde1.solve() print(\"-------------------------------------\") print(\"exact(0.5, 0.5) = {}\".format(fempde1.exact(np.array([0.5,0.5])))) print(\"approx(0.5,", "of freedom that can be created in the adaptive mesh", "time and memory consumption Examples -------- >>> import numpy as", "the pde by calling ngsolve, provides: static condensation, adaptive mesh", "ngs.GridFunction(self._fes, autoupdate=True) # solution self._g = 0.0 self._gfu.Set(self._g, definedon=self._mesh.Boundaries(\".*\")) #", "14:57:32 2020 @author: Nicolai \"\"\" import sys import os importpath", "condensation self._a = ngs.BilinearForm(self._fes, condense=True) self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx # creat", "as plt from matplotlib import cm fig = plt.figure() ax", "starts timer tstart = time.time() if self.show_gui: import netgen.gui #", "np.meshgrid(x, y) zs0 = np.array([fempde1.approx(\\ np.array([x,y])) for x,y in zip(np.ravel(X),", "import netgen.gui # create mesh with initial size 0.1 self._mesh", "used memory memstop = process.memory_info().vms - memstart self._mem_consumption = memstop", "exact and approx solution also sets execution time and memory", "def solve(self): # disable garbage collector # --------------------------------------------------------------------# gc.disable() while(gc.isenabled()):", "the adaptive loop stopped with a mesh refinement, the gfu", "the real solution {}\".format(fempde1.normL2())) print(\"solving took {} sec\".format(fempde1.exec_time)) print(\"solving uses", "to the real solution {}\".format(fempde1.normL2())) print(\"solving took {} sec\".format(fempde1.exec_time)) print(\"solving", "= np.array([0.5, 0.5]) >>> fempde2.exact(pos) >>> x -> numpy.ndarray with", "self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx # creat linear functional and apply RHS", "= ngs.BilinearForm(self._fes, condense=True) self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx # creat linear functional", "= 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10} Attributes ---------- max_nodf: int the maximum number of", "zip(np.ravel(X), np.ravel(Y))]) Z = zs0.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=cm.gnuplot) fig.tight_layout()", "sets execution time and memory consumption Examples -------- >>> import", "\"-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9", "measure how much memory is used until here process =", "sys.path.append(importpath) from FemPdeBase import FemPdeBase import numpy as np #", "= ngs.H1(self._mesh, order=2, dirichlet=\".*\", autoupdate=True) # test and trail function", "autoupdate=True) # TaskManager starts threads that (standard thread nr is", "< self.max_ndof: self._solveStep() self._estimateError() self._mesh.Refine() # since the adaptive loop", "{} Mb\".format(fempde1.mem_consumption/1000000)) from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt", "# --------------------------------------------------------------------# if __name__ == \"__main__\": fempde1 = FemPde1(True) print(fempde1.pde_string)", "90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx # preconditioner: multigrid - what prerequisits must the", "gc class FemPde1(FemPdeBase): \"\"\" **Implementation of PDE1 of the testbed:**", "# set measured used memory memstop = process.memory_info().vms - memstart", "Methods ------- solve() solves the pde by calling ngsolve, provides:", "and enable static condensation self._a = ngs.BilinearForm(self._fes, condense=True) self._a +=", "the solution:** .. math:: u(\\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10} Attributes ---------- max_nodf:", "200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))\" self._ngs_ex =", "# init protected self._pde_string = \"-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9", "must the problem have? self._c = ngs.Preconditioner(self._a,\"multigrid\") # create grid", "finite element space self._fes = ngs.H1(self._mesh, order=2, dirichlet=\".*\", autoupdate=True) #", "max_nodf: int the maximum number of degrees of freedom that", "cores) with ngs.TaskManager(): # this is the adaptive loop while", "0 self._gfu = ngs.GridFunction(self._fes, autoupdate=True) # solution self._g = 0.0", ">>> fempde2.exact(pos) >>> x -> numpy.ndarray with shape (2,) _mesh", "self._fes.TestFunction() # create bilinear form and enable static condensation self._a", "ngs.Preconditioner(self._a,\"multigrid\") # create grid function that holds the solution and", "self._solveStep() if self.show_gui: ngs.Draw(self._gfu) # set measured exectution time self._exec_time", "grid function that holds the solution and set the boundary", "np.ravel(Y))]) Z = zs0.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=cm.gnuplot) fig.tight_layout() ax.set_xlabel(\"X0\")", "init public self.max_ndof = max_ndof def solve(self): # disable garbage", "0.01) X, Y = np.meshgrid(x, y) zs0 = np.array([fempde1.exact(\\ np.array([x,y]))", "measured used memory memstop = process.memory_info().vms - memstart self._mem_consumption =", "try: fempde1.exact(np.array([0.5,0.5])) except: print(\"Î error message above\") try: fempde1.approx(np.array([0.5,0.5])) except:", "plt from matplotlib import cm fig = plt.figure() ax =", "__name__ == \"__main__\": fempde1 = FemPde1(True) print(fempde1.pde_string) try: fempde1.exact(np.array([0.5,0.5])) except:", "+ 90x^{10}(1-x)^8] -2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10} - 200y^9(1-y)^9 + 90y^{10}(1-y)^8] \\Omega: \\mathbf{x} \\in", "13 14:57:32 2020 @author: Nicolai \"\"\" import sys import os", "the gfu must be # calculated one last time self._solveStep()", "memory memstop = process.memory_info().vms - memstart self._mem_consumption = memstop #", "call solve() first >>> fempde2.solve() >>> fempde2.exact(pos) 1.0 >>> fempde2.approx(pos)", ">>> fempde2.solve() >>> fempde2.exact(pos) 1.0 >>> fempde2.approx(pos) 0.999998924259486 >>> fempde2.normL2()", "matplotlib.pyplot as plt from matplotlib import cm fig = plt.figure()", "self._fes = ngs.H1(self._mesh, order=2, dirichlet=\".*\", autoupdate=True) # test and trail", "50000 Methods ------- solve() solves the pde by calling ngsolve,", "create bilinear form and enable static condensation self._a = ngs.BilinearForm(self._fes,", "the maximum number of degrees of freedom that can be", "that (standard thread nr is numer of cores) with ngs.TaskManager():", "the boundary to 0 self._gfu = ngs.GridFunction(self._fes, autoupdate=True) # solution", "zs0 = np.array([fempde1.approx(\\ np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))]) Z", "self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10) # init public self.max_ndof = max_ndof def", "- 200x^9(1-x)^9 + 90x^{10}(1-x)^8] -2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10} - 200y^9(1-y)^9 + 90y^{10}(1-y)^8] \\Omega:", "gc.disable() while(gc.isenabled()): time.sleep(0.1) # --------------------------------------------------------------------# # measure how much memory", "memory is used until here process = psutil.Process() memstart =", "and trail function u = self._fes.TrialFunction() v = self._fes.TestFunction() #", "# starts timer tstart = time.time() if self.show_gui: import netgen.gui", "process.memory_info().vms # starts timer tstart = time.time() if self.show_gui: import", "- what prerequisits must the problem have? self._c = ngs.Preconditioner(self._a,\"multigrid\")", "-------- >>> import numpy as np >>> fempde2 = FemPde2(True)", "try to call solve() first >>> fempde2.solve() >>> fempde2.exact(pos) 1.0", "# disable garbage collector # --------------------------------------------------------------------# gc.disable() while(gc.isenabled()): time.sleep(0.1) #", "form and enable static condensation self._a = ngs.BilinearForm(self._fes, condense=True) self._a", "bbox_inches='tight') fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x =", "fempde1 = FemPde1(True) print(fempde1.pde_string) try: fempde1.exact(np.array([0.5,0.5])) except: print(\"Î error message", "gc.collect() # --------------------------------------------------------------------# if __name__ == \"__main__\": fempde1 = FemPde1(True)", "ngsolve, provides: static condensation, adaptive mesh refinement, parallelisation (where possible),", "have? self._c = ngs.Preconditioner(self._a,\"multigrid\") # create grid function that holds", "boundary to 0 self._gfu = ngs.GridFunction(self._fes, autoupdate=True) # solution self._g", "= np.arange(0, 1.01, 0.01) X, Y = np.meshgrid(x, y) zs0", "# test and trail function u = self._fes.TrialFunction() v =", "zs0 = np.array([fempde1.exact(\\ np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))]) Z", "netgen.geom2d import unit_square import time import psutil import gc class", "= zs0.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=cm.gnuplot) ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0,X1)\") plt.show()", "disable garbage collector # --------------------------------------------------------------------# gc.disable() while(gc.isenabled()): time.sleep(0.1) # --------------------------------------------------------------------#", "# solution self._g = 0.0 self._gfu.Set(self._g, definedon=self._mesh.Boundaries(\".*\")) # draw grid", "= process.memory_info().vms - memstart self._mem_consumption = memstop # enable garbage", "with ngs.TaskManager(): # this is the adaptive loop while self._fes.ndof", "of the testbed:** .. math:: - \\Delta u(\\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10}", "preconditioner: multigrid - what prerequisits must the problem have? self._c", "the distance between exact and approx solution also sets execution", "# import from ngsolve import ngsolve as ngs from netgen.geom2d", "show_gui, max_ndof=50000): super().__init__(show_gui) # init protected self._pde_string = \"-laplacian(u(x)) =", "self._exec_time = time.time() - tstart # set measured used memory", "collector # --------------------------------------------------------------------# gc.disable() while(gc.isenabled()): time.sleep(0.1) # --------------------------------------------------------------------# # measure", "solution and set the boundary to 0 self._gfu = ngs.GridFunction(self._fes,", "+ 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))\" self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10)", "= y = np.arange(0, 1.01, 0.01) X, Y = np.meshgrid(x,", "200x^9(1-x)^9 + 90x^{10}(1-x)^8] -2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10} - 200y^9(1-y)^9 + 90y^{10}(1-y)^8] \\Omega: \\mathbf{x}", "time self._solveStep() if self.show_gui: ngs.Draw(self._gfu) # set measured exectution time", "loop while self._fes.ndof < self.max_ndof: self._solveStep() self._estimateError() self._mesh.Refine() # since", "ax = fig.add_subplot(111, projection='3d') x = y = np.arange(0, 1.01,", "for evaluating the exact solution and calculating the distance between", "is used until here process = psutil.Process() memstart = process.memory_info().vms", "+ 90*ngs.x**10*(1-ngs.x)**8)) \\ -(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx #", "76705792 \"\"\" def __init__(self, show_gui, max_ndof=50000): super().__init__(show_gui) # init protected", "error message above\") try: fempde1.approx(np.array([0.5,0.5])) except: print(\"Î error message above\")", "0.0 self._gfu.Set(self._g, definedon=self._mesh.Boundaries(\".*\")) # draw grid function in gui if", "message above\") fempde1.solve() print(\"-------------------------------------\") print(\"exact(0.5, 0.5) = {}\".format(fempde1.exact(np.array([0.5,0.5])))) print(\"approx(0.5, 0.5)", "print(\"-------------------------------------\") print(\"exact(0.5, 0.5) = {}\".format(fempde1.exact(np.array([0.5,0.5])))) print(\"approx(0.5, 0.5) = {}\".format(fempde1.approx(np.array([0.5,0.5])))) print(\"L2", "= {}\".format(fempde1.approx(np.array([0.5,0.5])))) print(\"L2 norm to the real solution {}\".format(fempde1.normL2())) print(\"solving", "what prerequisits must the problem have? self._c = ngs.Preconditioner(self._a,\"multigrid\") #", "function that holds the solution and set the boundary to", "\"__main__\": fempde1 = FemPde1(True) print(fempde1.pde_string) try: fempde1.exact(np.array([0.5,0.5])) except: print(\"Î error", "protected self._pde_string = \"-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8))", "self._pde_string = \"-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10", "unit_square import time import psutil import gc class FemPde1(FemPdeBase): \"\"\"", "x -> numpy.ndarray with shape (2,) _mesh -> ngs.comp.Mesh _ngs_ex", "1.0 >>> fempde2.approx(pos) 0.999998924259486 >>> fempde2.normL2() 5.853102150391562e-07 >>> fempde2.exec_time 3.830256175994873", "FemPdeBase import FemPdeBase import numpy as np # import from", "fempde2.mem_consumption 76705792 \"\"\" def __init__(self, show_gui, max_ndof=50000): super().__init__(show_gui) # init", "gui if self.show_gui: ngs.Draw(self._gfu) # create Hcurl space for flux", "self._mem_consumption = memstop # enable garbage collector # --------------------------------------------------------------------# gc.enable()", "adaptive mesh refinement, parallelisation (where possible), sets the internal variables", "\\Omega: \\mathbf{x} \\in [0,1] u(\\mathbf{x})|_{\\partial \\Omega} = 0 **with the", "variables for evaluating the exact solution and calculating the distance", "order=2, dirichlet=\".*\", autoupdate=True) # test and trail function u =", "autoupdate=True) # solution self._g = 0.0 self._gfu.Set(self._g, definedon=self._mesh.Boundaries(\".*\")) # draw", "X, Y = np.meshgrid(x, y) zs0 = np.array([fempde1.approx(\\ np.array([x,y])) for", "one last time self._solveStep() if self.show_gui: ngs.Draw(self._gfu) # set measured", "200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \\ -(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx", "fempde1.solve() print(\"-------------------------------------\") print(\"exact(0.5, 0.5) = {}\".format(fempde1.exact(np.array([0.5,0.5])))) print(\"approx(0.5, 0.5) = {}\".format(fempde1.approx(np.array([0.5,0.5]))))", "(standard thread nr is numer of cores) with ngs.TaskManager(): #", "= np.array([fempde1.approx(\\ np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))]) Z =", "fempde2.exact(pos) >>> x -> numpy.ndarray with shape (2,) _mesh ->", "-(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \\ -(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 +", "autoupdate=True) self._gf_flux = ngs.GridFunction(self._space_flux, \"flux\", autoupdate=True) # TaskManager starts threads", "from matplotlib import cm fig = plt.figure() ax = fig.add_subplot(111,", "ngsolve as ngs from netgen.geom2d import unit_square import time import", "90*ngs.x**10*(1-ngs.x)**8)) \\ -(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx # preconditioner:", "90y^{10}(1-y)^8] \\Omega: \\mathbf{x} \\in [0,1] u(\\mathbf{x})|_{\\partial \\Omega} = 0 **with", "is 50000 Methods ------- solve() solves the pde by calling", "fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x = y", "problem have? self._c = ngs.Preconditioner(self._a,\"multigrid\") # create grid function that", "2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10} Attributes ---------- max_nodf: int the maximum number of degrees", "if self.show_gui: import netgen.gui # create mesh with initial size", "bilinear form and enable static condensation self._a = ngs.BilinearForm(self._fes, condense=True)", "time.sleep(0.1) # --------------------------------------------------------------------# # measure how much memory is used", "time.time() if self.show_gui: import netgen.gui # create mesh with initial", "= fig.add_subplot(111, projection='3d') x = y = np.arange(0, 1.01, 0.01)", "\\Delta u(\\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10} - 200x^9(1-x)^9 + 90x^{10}(1-x)^8] -2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10} -", "= 0.0 self._gfu.Set(self._g, definedon=self._mesh.Boundaries(\".*\")) # draw grid function in gui", "this is the adaptive loop while self._fes.ndof < self.max_ndof: self._solveStep()", "import matplotlib.pyplot as plt from matplotlib import cm fig =", "the solution and set the boundary to 0 self._gfu =", "**Implementation of PDE1 of the testbed:** .. math:: - \\Delta", "{}\".format(fempde1.approx(np.array([0.5,0.5])))) print(\"L2 norm to the real solution {}\".format(fempde1.normL2())) print(\"solving took", "while(gc.isenabled()): time.sleep(0.1) # --------------------------------------------------------------------# # measure how much memory is", "self.max_ndof: self._solveStep() self._estimateError() self._mesh.Refine() # since the adaptive loop stopped", "refinement, parallelisation (where possible), sets the internal variables for evaluating", "mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import", "initial size 0.1 self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1)) #create finite element space", "testbed:** .. math:: - \\Delta u(\\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10} - 200x^9(1-x)^9", "os importpath = os.path.dirname(os.path.realpath(__file__)) + \"/../\" sys.path.append(importpath) from FemPdeBase import", "here process = psutil.Process() memstart = process.memory_info().vms # starts timer", "matplotlib import cm fig = plt.figure() ax = fig.add_subplot(111, projection='3d')", "= np.array([fempde1.exact(\\ np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))]) Z =", "fempde2.approx(pos) 0.999998924259486 >>> fempde2.normL2() 5.853102150391562e-07 >>> fempde2.exec_time 3.830256175994873 >>> fempde2.mem_consumption", "mesh refinement, standard value is 50000 Methods ------- solve() solves", "shape (2,) _mesh -> ngs.comp.Mesh _ngs_ex -> ngs.fem.CoefficientFunction -> try", "-(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx # preconditioner: multigrid -", "np.array([0.5, 0.5]) >>> fempde2.exact(pos) >>> x -> numpy.ndarray with shape", "evaluating the exact solution and calculating the distance between exact", "degrees of freedom that can be created in the adaptive", "2020 @author: Nicolai \"\"\" import sys import os importpath =", "__init__(self, show_gui, max_ndof=50000): super().__init__(show_gui) # init protected self._pde_string = \"-laplacian(u(x))", "solve() solves the pde by calling ngsolve, provides: static condensation,", "ngs.GridFunction(self._space_flux, \"flux\", autoupdate=True) # TaskManager starts threads that (standard thread", "# init public self.max_ndof = max_ndof def solve(self): # disable", "Mb\".format(fempde1.mem_consumption/1000000)) from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from", "much memory is used until here process = psutil.Process() memstart", "max_ndof def solve(self): # disable garbage collector # --------------------------------------------------------------------# gc.disable()", "\"\"\" **Implementation of PDE1 of the testbed:** .. math:: -", "y) zs0 = np.array([fempde1.approx(\\ np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])", "np.meshgrid(x, y) zs0 = np.array([fempde1.exact(\\ np.array([x,y])) for x,y in zip(np.ravel(X),", "\"/../\" sys.path.append(importpath) from FemPdeBase import FemPdeBase import numpy as np", "consumption Examples -------- >>> import numpy as np >>> fempde2", "= ngs.GridFunction(self._space_flux, \"flux\", autoupdate=True) # TaskManager starts threads that (standard", ">>> fempde2.exec_time 3.830256175994873 >>> fempde2.mem_consumption 76705792 \"\"\" def __init__(self, show_gui,", "if self.show_gui: ngs.Draw(self._gfu) # create Hcurl space for flux calculation", "a mesh refinement, the gfu must be # calculated one", "standard value is 50000 Methods ------- solve() solves the pde", "in zip(np.ravel(X), np.ravel(Y))]) Z = zs0.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)", "( \\ -(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \\ -(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 -", "ngs.H1(self._mesh, order=2, dirichlet=\".*\", autoupdate=True) # test and trail function u", "be # calculated one last time self._solveStep() if self.show_gui: ngs.Draw(self._gfu)", "print(\"Î error message above\") try: fempde1.approx(np.array([0.5,0.5])) except: print(\"Î error message", "np.array([fempde1.exact(\\ np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))]) Z = zs0.reshape(X.shape)", "Examples -------- >>> import numpy as np >>> fempde2 =", "v = self._fes.TestFunction() # create bilinear form and enable static", "trail function u = self._fes.TrialFunction() v = self._fes.TestFunction() # create", "of PDE1 of the testbed:** .. math:: - \\Delta u(\\mathbf{x})", "u(\\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10} Attributes ---------- max_nodf: int the maximum number", "self._estimateError() self._mesh.Refine() # since the adaptive loop stopped with a", "Y = np.meshgrid(x, y) zs0 = np.array([fempde1.approx(\\ np.array([x,y])) for x,y", "print(fempde1.pde_string) try: fempde1.exact(np.array([0.5,0.5])) except: print(\"Î error message above\") try: fempde1.approx(np.array([0.5,0.5]))", "-2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10} - 200y^9(1-y)^9 + 90y^{10}(1-y)^8] \\Omega: \\mathbf{x} \\in [0,1] u(\\mathbf{x})|_{\\partial", "+ 90*y^10*(1-y)^8))\" self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10) # init public self.max_ndof =", "distance between exact and approx solution also sets execution time", "# draw grid function in gui if self.show_gui: ngs.Draw(self._gfu) #", "above\") try: fempde1.approx(np.array([0.5,0.5])) except: print(\"Î error message above\") fempde1.solve() print(\"-------------------------------------\")", "nr is numer of cores) with ngs.TaskManager(): # this is", "Z, cmap=cm.gnuplot) fig.tight_layout() ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0, X1)\") plt.show() fig.savefig(\"sol_pde_1.pdf\", bbox_inches='tight')", "and estimate error self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True) self._gf_flux =", "time.time() - tstart # set measured used memory memstop =", "200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))\" self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10) # init public self.max_ndof", "FemPde1(FemPdeBase): \"\"\" **Implementation of PDE1 of the testbed:** .. math::", "self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1)) #create finite element space self._fes = ngs.H1(self._mesh,", "_mesh -> ngs.comp.Mesh _ngs_ex -> ngs.fem.CoefficientFunction -> try to call", ".. math:: u(\\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10} Attributes ---------- max_nodf: int the", "# --------------------------------------------------------------------# gc.enable() gc.collect() # --------------------------------------------------------------------# if __name__ == \"__main__\":", "math:: u(\\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10} Attributes ---------- max_nodf: int the maximum", "Z = zs0.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=cm.gnuplot) ax.set_xlabel(\"X0\") ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0,X1)\")", "{} sec\".format(fempde1.exec_time)) print(\"solving uses {} Mb\".format(fempde1.mem_consumption/1000000)) from mpl_toolkits.mplot3d import Axes3D", "FemPde2(True) >>> pos = np.array([0.5, 0.5]) >>> fempde2.exact(pos) >>> x", "\\mathbf{x} \\in [0,1] u(\\mathbf{x})|_{\\partial \\Omega} = 0 **with the solution:**", "= np.meshgrid(x, y) zs0 = np.array([fempde1.approx(\\ np.array([x,y])) for x,y in", "internal variables for evaluating the exact solution and calculating the", "+ \"/../\" sys.path.append(importpath) from FemPdeBase import FemPdeBase import numpy as", "# --------------------------------------------------------------------# # measure how much memory is used until", "+ 90y^{10}(1-y)^8] \\Omega: \\mathbf{x} \\in [0,1] u(\\mathbf{x})|_{\\partial \\Omega} = 0", "- tstart # set measured used memory memstop = process.memory_info().vms", "as ngs from netgen.geom2d import unit_square import time import psutil", "until here process = psutil.Process() memstart = process.memory_info().vms # starts", "-2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10} - 200x^9(1-x)^9 + 90x^{10}(1-x)^8] -2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10} - 200y^9(1-y)^9 + 90y^{10}(1-y)^8]", "public self.max_ndof = max_ndof def solve(self): # disable garbage collector", "for x,y in zip(np.ravel(X), np.ravel(Y))]) Z = zs0.reshape(X.shape) ax.plot_surface(X, Y,", "print(\"solving took {} sec\".format(fempde1.exec_time)) print(\"solving uses {} Mb\".format(fempde1.mem_consumption/1000000)) from mpl_toolkits.mplot3d", "super().__init__(show_gui) # init protected self._pde_string = \"-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 -", "self._c = ngs.Preconditioner(self._a,\"multigrid\") # create grid function that holds the", "value is 50000 Methods ------- solve() solves the pde by", "ax.set_ylabel(\"X1\") ax.set_zlabel(\"f(X0, X1)\") plt.show() fig.savefig(\"sol_pde_1.pdf\", bbox_inches='tight') fig = plt.figure() ax", "create Hcurl space for flux calculation and estimate error self._space_flux", "parallelisation (where possible), sets the internal variables for evaluating the", "flux calculation and estimate error self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True)", "dirichlet=\".*\", autoupdate=True) # test and trail function u = self._fes.TrialFunction()", "on Mon Apr 13 14:57:32 2020 @author: Nicolai \"\"\" import", "= ngs.LinearForm(self._fes) self._f += ( \\ -(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 +", "plt.show() fig.savefig(\"sol_pde_1.pdf\", bbox_inches='tight') fig = plt.figure() ax = fig.add_subplot(111, projection='3d')", "-> numpy.ndarray with shape (2,) _mesh -> ngs.comp.Mesh _ngs_ex ->", "0 **with the solution:** .. math:: u(\\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10} Attributes", "time import psutil import gc class FemPde1(FemPdeBase): \"\"\" **Implementation of", ">>> fempde2.approx(pos) 0.999998924259486 >>> fempde2.normL2() 5.853102150391562e-07 >>> fempde2.exec_time 3.830256175994873 >>>", "1.01, 0.01) X, Y = np.meshgrid(x, y) zs0 = np.array([fempde1.approx(\\", "\\in [0,1] u(\\mathbf{x})|_{\\partial \\Omega} = 0 **with the solution:** ..", "execution time and memory consumption Examples -------- >>> import numpy", "# TaskManager starts threads that (standard thread nr is numer", "class FemPde1(FemPdeBase): \"\"\" **Implementation of PDE1 of the testbed:** ..", "solve() first >>> fempde2.solve() >>> fempde2.exact(pos) 1.0 >>> fempde2.approx(pos) 0.999998924259486", "Apr 13 14:57:32 2020 @author: Nicolai \"\"\" import sys import", "ngs.Draw(self._gfu) # set measured exectution time self._exec_time = time.time() -", "import os importpath = os.path.dirname(os.path.realpath(__file__)) + \"/../\" sys.path.append(importpath) from FemPdeBase", "ngs.HDiv(self._mesh, order=2, autoupdate=True) self._gf_flux = ngs.GridFunction(self._space_flux, \"flux\", autoupdate=True) # TaskManager", ">>> x -> numpy.ndarray with shape (2,) _mesh -> ngs.comp.Mesh", "import FemPdeBase import numpy as np # import from ngsolve", "max_ndof=50000): super().__init__(show_gui) # init protected self._pde_string = \"-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10", "+= ( \\ -(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \\ -(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10", "0.999998924259486 >>> fempde2.normL2() 5.853102150391562e-07 >>> fempde2.exec_time 3.830256175994873 >>> fempde2.mem_consumption 76705792", "- 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx # preconditioner: multigrid - what", "np.arange(0, 1.01, 0.01) X, Y = np.meshgrid(x, y) zs0 =", "ngs from netgen.geom2d import unit_square import time import psutil import", "numer of cores) with ngs.TaskManager(): # this is the adaptive", "import time import psutil import gc class FemPde1(FemPdeBase): \"\"\" **Implementation", "np >>> fempde2 = FemPde2(True) >>> pos = np.array([0.5, 0.5])", "--------------------------------------------------------------------# gc.disable() while(gc.isenabled()): time.sleep(0.1) # --------------------------------------------------------------------# # measure how much", "mesh refinement, the gfu must be # calculated one last", "as np # import from ngsolve import ngsolve as ngs", ">>> fempde2 = FemPde2(True) >>> pos = np.array([0.5, 0.5]) >>>", "self._gf_flux = ngs.GridFunction(self._space_flux, \"flux\", autoupdate=True) # TaskManager starts threads that", "if __name__ == \"__main__\": fempde1 = FemPde1(True) print(fempde1.pde_string) try: fempde1.exact(np.array([0.5,0.5]))", "---------- max_nodf: int the maximum number of degrees of freedom", "created in the adaptive mesh refinement, standard value is 50000", "X, Y = np.meshgrid(x, y) zs0 = np.array([fempde1.exact(\\ np.array([x,y])) for", "self._solveStep() self._estimateError() self._mesh.Refine() # since the adaptive loop stopped with", "timer tstart = time.time() if self.show_gui: import netgen.gui # create", "0.1 self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1)) #create finite element space self._fes =", "enable garbage collector # --------------------------------------------------------------------# gc.enable() gc.collect() # --------------------------------------------------------------------# if", "to 0 self._gfu = ngs.GridFunction(self._fes, autoupdate=True) # solution self._g =", "0.01) X, Y = np.meshgrid(x, y) zs0 = np.array([fempde1.approx(\\ np.array([x,y]))", "= -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 +", "possible), sets the internal variables for evaluating the exact solution", "= self._fes.TestFunction() # create bilinear form and enable static condensation", "np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))]) Z = zs0.reshape(X.shape) ax.plot_surface(X,", "tstart = time.time() if self.show_gui: import netgen.gui # create mesh", "Axes3D import matplotlib.pyplot as plt from matplotlib import cm fig", "Attributes ---------- max_nodf: int the maximum number of degrees of", "message above\") try: fempde1.approx(np.array([0.5,0.5])) except: print(\"Î error message above\") fempde1.solve()", "function in gui if self.show_gui: ngs.Draw(self._gfu) # create Hcurl space", "= -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10} - 200x^9(1-x)^9 + 90x^{10}(1-x)^8] -2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10} - 200y^9(1-y)^9 +", "self.max_ndof = max_ndof def solve(self): # disable garbage collector #", "ngs.comp.Mesh _ngs_ex -> ngs.fem.CoefficientFunction -> try to call solve() first", "= time.time() if self.show_gui: import netgen.gui # create mesh with", "uses {} Mb\".format(fempde1.mem_consumption/1000000)) from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as" ]
[ "2.7788, 3.4617, 4.1677, 4.6873, 3.9528, 1.7051, 2.6228, 4.7419, 4.6676, \"\"\")", "-1 0 1 0 0 1 ; 0 0 0", "in range(12): #plot edges for j in range(i): if Ad[i,j]", "in range(i): if Ad[i,j] == 1: pass #plt.plot((XY[i,0], XY[j,0]), (XY[i,1],", "coordinates # each row is node x-y coordinates XY =", "\"\"\"); # node adjacency matrix Ad = -A*A.T Ad =", "= time.time() TIME += toc - tic ANSWERS.append(val) pass #print", "0 -1 1 0 0 0 0 0 0 0", "edges (transmission lines) k = 4 # number of generators", "0 0 0 0 0 0 0 ; 0 0", "toc - tic ANSWERS.append(val) pass #print val pass #print g.value", "0 0 0 0 0 0 0 -1 -1 0", "to generate the network graph # x-y coordinates # each", "-1 0 0 0 0 0 0 -1 ; 0", "np.matrix(\"\"\" -1 -1 0 0 0 0 0 0 0", "0 ; 0 0 0 0 0 1 0 0", "0 0 0 0 0 0 -1 -1 0 0", "#plot edges for j in range(i): if Ad[i,j] == 1:", "Variable(k) p = Variable(m) obj = Minimize(c.T*g) constraints = [A*p", "0 0 0 0 0 0 0 -1 1 0", "= 0 Pmax = np.matrix(\"\"\" 4.8005, 1.9246, 3.4274, 2.9439, 4.5652,", "Ad = -A*A.T Ad = Ad - np.diag(np.diag(Ad)) epsx =", "pass #print g.value # N - 1 contingency g =", "i in range(12): #plot edges for j in range(i): if", "g <= Gmax] prob = Problem(obj, constraints) tic = time.time()", "0 1 -1 0 0 0 0 0 0 0", "0 0 0 1 1 0 0 0 -1 0", "obj = Minimize(c.T*g) constraints = [A*p == vstack(-g, d.T), abs(p)", "i in range(m): flows.append(Variable(m)) constraints = [g <= Gmax, 0", "0 ; 0 0 0 0 0 0 0 0", "= np.matrix(\"4; 8; 5; 3\") # supply generator costs d", "0 -1 1 0 \"\"\") g = Variable(k) p =", "1.3041 \"\"\")# network power demands # graph incidence matrix A", "; 0 0 0 0 0 1 0 0 0", "TIME = 0 Pmax = np.matrix(\"\"\" 4.8005, 1.9246, 3.4274, 2.9439,", "val pass #print g.value # the code below is not", "0 0 0 ; 0 0 0 0 0 0", "maximum generator power c = np.matrix(\"4; 8; 5; 3\") #", "0 0 0 -1 1 0 0 0 0 0", "= Variable(k) p = Variable(m) obj = Minimize(c.T*g) constraints =", "#print g.value # N - 1 contingency g = Variable(k)", "Ad - np.diag(np.diag(Ad)) epsx = 0.05; epsy = 0.15; #", "below is not data for the problem # it is", "0 <= g] for i in range(m): # N -1", "np.matrix(\"\"\" 1.5 5.2; 4.9 5; 6.9 3.5; 1.9 3.5; 0.2", "0 0 0 0 0 0 0 0 0 1", "3.2 4.8; 5.9 4.5; 3.9 3.6; 5.9 2.5; 3.9 3;", "d.T)) constraints.append( flows[i][i] == 0 ) constraints.append( abs(flows[i]) <= Pmax.T", "text placing offset # plotting import matplotlib.pyplot as plt for", "0 0 0 1 0 0 0 0 -1 1", "range(m): # N -1 redundancy constraints.append(A*flows[i] == vstack(-g, d.T)) constraints.append(", "0 ; 0 1 1 0 -1 0 1 -1", "= 0.15; # text placing offset # plotting import matplotlib.pyplot", "0 -1 ; 0 0 0 0 0 0 -1", "code below is not data for the problem # it", "for i in range(m): # N -1 redundancy constraints.append(A*flows[i] ==", "4.8; 5.9 4.5; 3.9 3.6; 5.9 2.5; 3.9 3; 1.4", "plt for i in range(12): #plot edges for j in", "0 -1 -1 0 0 0 0 0 0 -1", "[] obj = Minimize(c.T*g) for i in range(m): flows.append(Variable(m)) constraints", "# node adjacency matrix Ad = -A*A.T Ad = Ad", "0 0 0 -1 1 0 \"\"\") g = Variable(k)", "0 ; 0 0 0 0 0 0 0 1", "Minimize(c.T*g) for i in range(m): flows.append(Variable(m)) constraints = [g <=", "0 0 0 0 1 0 1 0 0 ;", "= np.matrix(\"\"\" -1 -1 0 0 0 0 0 0", "pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1)) for j in range(k,n): pass #plt.plot(XY[j,0],XY[j,1],'ko') pass #plt.axis('off')", "0 0 0 0 -1 -1 0 0 0 0", "0 0 0 0 0 0 ; 0 0 0", "flow problem import numpy as np n = 12 #", "0 0 0 0 0 0 ; 0 0 -1", "0 0 -1 ; 0 0 0 0 0 0", "constraints = [g <= Gmax, 0 <= g] for i", "it is used only to generate the network graph #", "constraints.append( flows[i][i] == 0 ) constraints.append( abs(flows[i]) <= Pmax.T )", "not data for the problem # it is used only", "markersize=12) pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1)) for j in range(k,n): pass #plt.plot(XY[j,0],XY[j,1],'ko') pass", "== vstack(-g, d.T)) constraints.append( flows[i][i] == 0 ) constraints.append( abs(flows[i])", "- 1 contingency g = Variable(k) flows = [] obj", "for i in range(12): #plot edges for j in range(i):", "\"\"\")# network power demands # graph incidence matrix A =", "0 1 0 0 1 ; 0 0 0 0", "4.5; 3.9 3.6; 5.9 2.5; 3.9 3; 1.4 2.5; 0", "pass #print val pass #print g.value # the code below", "0 0 0 ; 0 0 -1 -1 0 0", "flows = [] obj = Minimize(c.T*g) for i in range(m):", "in range(m): flows.append(Variable(m)) constraints = [g <= Gmax, 0 <=", "matrix Ad = -A*A.T Ad = Ad - np.diag(np.diag(Ad)) epsx", "import time # data for power flow problem import numpy", "1.6154 2.3405 1.0868 1.5293 2.2197 1.0148 1.2083 1.3041 \"\"\")# network", "0 1 1 0 0 0 -1 0 1 0", "pass #plt.plot(XY[j,0],XY[j,1],'ko') pass #plt.axis('off') pass #plt.savefig('pwr_net.eps') if __name__ == '__main__':", "vstack(-g, d.T)) constraints.append( flows[i][i] == 0 ) constraints.append( abs(flows[i]) <=", "0 0 0 0 0 0 1 1 0 0", "g.value # the code below is not data for the", "0 0 0 0 0 -1 ; 0 0 0", "is node x-y coordinates XY = np.matrix(\"\"\" 1.5 5.2; 4.9", "0 -1 0 ; 1 0 0 0 1 -1", "== 1: pass #plt.plot((XY[i,0], XY[j,0]), (XY[i,1], XY[j,1]), 'k-') for j", "TIME += toc - tic ANSWERS.append(val) pass #print val pass", "-1 1 0 \"\"\") g = Variable(k) p = Variable(m)", "2.6228, 4.7419, 4.6676, \"\"\") Gmax = np.matrix(\"3; 2; 4; 7\")", "<= Pmax.T, 0 <= g, g <= Gmax] prob =", "Gmax = np.matrix(\"3; 2; 4; 7\") # maximum generator power", "<= g, g <= Gmax] prob = Problem(obj, constraints) tic", "offset # plotting import matplotlib.pyplot as plt for i in", "for j in range(k,n): pass #plt.plot(XY[j,0],XY[j,1],'ko') pass #plt.axis('off') pass #plt.savefig('pwr_net.eps')", "val = prob.solve() toc = time.time() TIME += toc -", "4.4; 3.2 4.8; 5.9 4.5; 3.9 3.6; 5.9 2.5; 3.9", "is not data for the problem # it is used", "0 0 ; 0 0 0 0 0 0 0", "np.matrix(\"4; 8; 5; 3\") # supply generator costs d =", "0 0 0 0 0 -1 -1 0 0 0", "0 0 0 0 -1 ; 0 0 0 0", "+= toc - tic ANSWERS.append(val) pass #print val pass #print", "import numpy as np n = 12 # total number", "2.5; 3.9 3; 1.4 2.5; 0 3 \"\"\"); # node", "#plot nodes pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12) pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1)) for j in", "1.0148 1.2083 1.3041 \"\"\")# network power demands # graph incidence", "8; 5; 3\") # supply generator costs d = np.matrix(\"\"\"", "0 0 0 0 ; 0 0 0 0 0", "0 0 0 0 0 0 0 0 0 ;", "3.9 3; 1.4 2.5; 0 3 \"\"\"); # node adjacency", "= TIME = 0 Pmax = np.matrix(\"\"\" 4.8005, 1.9246, 3.4274,", "0 Pmax = np.matrix(\"\"\" 4.8005, 1.9246, 3.4274, 2.9439, 4.5652, 4.0484,", "1.7051, 2.6228, 4.7419, 4.6676, \"\"\") Gmax = np.matrix(\"3; 2; 4;", "prob.solve() toc = time.time() TIME += toc - tic ANSWERS.append(val)", "0 0 0 0 0 0 0 -1 0 -1", "graph incidence matrix A = np.matrix(\"\"\" -1 -1 0 0", "0 1 0 0 0 0 0 0 0 0", "m = 18 # number of edges (transmission lines) k", "transmission line capacities = TIME = 0 Pmax = np.matrix(\"\"\"", "1.9246, 3.4274, 2.9439, 4.5652, 4.0484, 2.8259, 1.0740, 4.2856, 2.7788, 3.4617,", "4.1677, 4.6873, 3.9528, 1.7051, 2.6228, 4.7419, 4.6676, \"\"\") Gmax =", "ANSWERS.append(val) pass #print val pass #print g.value # N -", "-1 0 0 0 0 0 0 0 -1 0", "0 0 0 0 1 -1 0 0 0 0", "0 0 0 0 0 0 0 0 -1 1", "x-y coordinates XY = np.matrix(\"\"\" 1.5 5.2; 4.9 5; 6.9", "= 4 # number of generators # transmission line capacities", "n = 12 # total number of nodes m =", "0 \"\"\") g = Variable(k) p = Variable(m) obj =", "0 -1 0 0 0 0 0 0 0 -1", "copy import time # data for power flow problem import", "XY = np.matrix(\"\"\" 1.5 5.2; 4.9 5; 6.9 3.5; 1.9", "Ad[i,j] == 1: pass #plt.plot((XY[i,0], XY[j,0]), (XY[i,1], XY[j,1]), 'k-') for", "supply generator costs d = np.matrix(\"\"\" 1.6154 2.3405 1.0868 1.5293", "= Variable(m) obj = Minimize(c.T*g) constraints = [A*p == vstack(-g,", "1 -1 0 0 0 0 0 0 ; 0", "0 0 0 0 0 0 0 1 1 0", "4.8005, 1.9246, 3.4274, 2.9439, 4.5652, 4.0484, 2.8259, 1.0740, 4.2856, 2.7788,", "g = Variable(k) p = Variable(m) obj = Minimize(c.T*g) constraints", "<= Pmax.T ) prob = Problem(obj, constraints) tic = time.time()", "the code below is not data for the problem #", "- np.diag(np.diag(Ad)) epsx = 0.05; epsy = 0.15; # text", "abs(p) <= Pmax.T, 0 <= g, g <= Gmax] prob", "time # data for power flow problem import numpy as", "-1 0 -1 0 ; 1 0 0 0 1", "0 0 0 0 0 ; 0 0 -1 -1", "== vstack(-g, d.T), abs(p) <= Pmax.T, 0 <= g, g", "0 1 1 -1 0 0 0 0 ; 0", "#print val pass #print g.value # N - 1 contingency", "0 1 0 0 0 0 -1 1 0 0", "0 0 0 0 0 0 0 ; 0 1", "range(k): #plot nodes pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12) pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1)) for j", "lines) k = 4 # number of generators # transmission", "# plotting import matplotlib.pyplot as plt for i in range(12):", "0 0 0 0 0 0 -1 0 -1 0", "1 0 0 0 -1 0 1 0 0 1", "= np.matrix(\"3; 2; 4; 7\") # maximum generator power c", "0 1 ; 0 0 0 0 0 0 0", "4; 7\") # maximum generator power c = np.matrix(\"4; 8;", "= Minimize(c.T*g) constraints = [A*p == vstack(-g, d.T), abs(p) <=", "total number of nodes m = 18 # number of", "= 12 # total number of nodes m = 18", ") prob = Problem(obj, constraints) tic = time.time() val =", "epsx = 0.05; epsy = 0.15; # text placing offset", "Gmax, 0 <= g] for i in range(m): # N", "= [g <= Gmax, 0 <= g] for i in", "capacities = TIME = 0 Pmax = np.matrix(\"\"\" 4.8005, 1.9246,", "0 0 0 0 0 0 0 0 1 0", "2.5; 0 3 \"\"\"); # node adjacency matrix Ad =", "-1 0 ; 1 0 0 0 1 -1 0", "g] for i in range(m): # N -1 redundancy constraints.append(A*flows[i]", "of edges (transmission lines) k = 4 # number of", "coordinates XY = np.matrix(\"\"\" 1.5 5.2; 4.9 5; 6.9 3.5;", "# number of generators # transmission line capacities = TIME", "1 0 -1 0 1 -1 0 0 0 0", "nodes pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12) pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1)) for j in range(k,n):", "0 0 0 0 ; 0 0 0 1 0", "0 0 0 0 ; 0 1 1 0 -1", "import copy import time # data for power flow problem", "each row is node x-y coordinates XY = np.matrix(\"\"\" 1.5", "from cvxpy import * import copy import time # data", "0 0 1 1 0 0 0 -1 0 1", "pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12) pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1)) for j in range(k,n): pass", "edges for j in range(i): if Ad[i,j] == 1: pass", "np.matrix(\"\"\" 1.6154 2.3405 1.0868 1.5293 2.2197 1.0148 1.2083 1.3041 \"\"\")#", "3.6; 5.9 2.5; 3.9 3; 1.4 2.5; 0 3 \"\"\");", "0 1 -1 0 0 0 0 0 0 ;", "1.2083 1.3041 \"\"\")# network power demands # graph incidence matrix", ") constraints.append( abs(flows[i]) <= Pmax.T ) prob = Problem(obj, constraints)", "time.time() TIME += toc - tic ANSWERS.append(val) pass #print val", "5; 6.9 3.5; 1.9 3.5; 0.2 4.4; 3.2 4.8; 5.9", "Variable(m) obj = Minimize(c.T*g) constraints = [A*p == vstack(-g, d.T),", "# data for power flow problem import numpy as np", "1 -1 0 0 0 0 ; 0 0 0", "node x-y coordinates XY = np.matrix(\"\"\" 1.5 5.2; 4.9 5;", "Problem(obj, constraints) tic = time.time() val = prob.solve() toc =", "0 -1 0 -1 0 ; 1 0 0 0", "\"\"\") g = Variable(k) p = Variable(m) obj = Minimize(c.T*g)", "-1 -1 0 0 0 0 0 0 -1 ;", "# it is used only to generate the network graph", "nodes m = 18 # number of edges (transmission lines)", "line capacities = TIME = 0 Pmax = np.matrix(\"\"\" 4.8005,", "0 0 0 0 0 ; 0 1 1 0", "4.0484, 2.8259, 1.0740, 4.2856, 2.7788, 3.4617, 4.1677, 4.6873, 3.9528, 1.7051,", "1 0 \"\"\") g = Variable(k) p = Variable(m) obj", "N - 1 contingency g = Variable(k) flows = []", "0 0 0 0 0 0 1 0 1 0", "-1 0 0 0 0 ; 0 0 0 0", "the network graph # x-y coordinates # each row is", "2.2197 1.0148 1.2083 1.3041 \"\"\")# network power demands # graph", "np n = 12 # total number of nodes m", "g, g <= Gmax] prob = Problem(obj, constraints) tic =", "2; 4; 7\") # maximum generator power c = np.matrix(\"4;", "Pmax = np.matrix(\"\"\" 4.8005, 1.9246, 3.4274, 2.9439, 4.5652, 4.0484, 2.8259,", "for power flow problem import numpy as np n =", "placing offset # plotting import matplotlib.pyplot as plt for i", "12 # total number of nodes m = 18 #", "3.5; 1.9 3.5; 0.2 4.4; 3.2 4.8; 5.9 4.5; 3.9", "1 0 0 ; 0 0 0 0 0 1", "0 0 0 ; 0 1 1 0 -1 0", "constraints = [A*p == vstack(-g, d.T), abs(p) <= Pmax.T, 0", "0 0 0 -1 -1 0 0 0 0 0", "# each row is node x-y coordinates XY = np.matrix(\"\"\"", "0 ; 1 0 0 0 1 -1 0 0", "-1 -1 0 0 0 0 0 0 0 0", "flows[i][i] == 0 ) constraints.append( abs(flows[i]) <= Pmax.T ) prob", "toc = time.time() TIME += toc - tic ANSWERS.append(val) pass", "5.9 2.5; 3.9 3; 1.4 2.5; 0 3 \"\"\"); #", "0 0 0 -1 0 -1 0 ; 1 0", "0 0 0 1 0 1 0 0 ; 0", "# N -1 redundancy constraints.append(A*flows[i] == vstack(-g, d.T)) constraints.append( flows[i][i]", "pass #plt.axis('off') pass #plt.savefig('pwr_net.eps') if __name__ == '__main__': pass #plt.show()", "1 0 1 0 0 ; 0 0 0 0", "0 0 0 1 0 0 0 0 0 0", "g = Variable(k) flows = [] obj = Minimize(c.T*g) for", "0 0 0 0 0 0 0 0 0 0", "0 0 0 0 0 0 ; 0 1 1", "pass #plt.plot((XY[i,0], XY[j,0]), (XY[i,1], XY[j,1]), 'k-') for j in range(k):", "matrix A = np.matrix(\"\"\" -1 -1 0 0 0 0", "; 0 1 1 0 -1 0 1 -1 0", "0 -1 0 1 -1 0 0 0 0 0", "for i in range(m): flows.append(Variable(m)) constraints = [g <= Gmax,", "j in range(k): #plot nodes pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12) pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1))", "2.8259, 1.0740, 4.2856, 2.7788, 3.4617, 4.1677, 4.6873, 3.9528, 1.7051, 2.6228,", "1.5293 2.2197 1.0148 1.2083 1.3041 \"\"\")# network power demands #", "vstack(-g, d.T), abs(p) <= Pmax.T, 0 <= g, g <=", "constraints) tic = time.time() val = prob.solve() toc = time.time()", "[A*p == vstack(-g, d.T), abs(p) <= Pmax.T, 0 <= g,", "0 1 1 0 -1 0 1 -1 0 0", "3; 1.4 2.5; 0 3 \"\"\"); # node adjacency matrix", "0 0 ; 0 0 0 1 0 0 0", "Pmax.T, 0 <= g, g <= Gmax] prob = Problem(obj,", "0 0 0 1 -1 0 0 0 0 0", "np from cvxpy import * import copy import time #", "0 0 0 0 0 -1 0 -1 0 ;", "in range(k,n): pass #plt.plot(XY[j,0],XY[j,1],'ko') pass #plt.axis('off') pass #plt.savefig('pwr_net.eps') if __name__", "0 0 0 0 0 0 0 0 -1 -1", "0 0 0 0 0 1 0 1 0 0", "4.6676, \"\"\") Gmax = np.matrix(\"3; 2; 4; 7\") # maximum", "<= g] for i in range(m): # N -1 redundancy", "= -A*A.T Ad = Ad - np.diag(np.diag(Ad)) epsx = 0.05;", "A = np.matrix(\"\"\" -1 -1 0 0 0 0 0", "np.matrix(\"3; 2; 4; 7\") # maximum generator power c =", "# maximum generator power c = np.matrix(\"4; 8; 5; 3\")", "-1 0 0 0 0 0 0 ; 0 0", "; 0 0 -1 -1 0 0 0 0 0", "import * import copy import time # data for power", "; 0 0 0 0 0 0 0 1 1", "N -1 redundancy constraints.append(A*flows[i] == vstack(-g, d.T)) constraints.append( flows[i][i] ==", "0 0 0 0 0 0 1 -1 0 0", "3.4274, 2.9439, 4.5652, 4.0484, 2.8259, 1.0740, 4.2856, 2.7788, 3.4617, 4.1677,", "0 -1 0 1 0 0 1 ; 0 0", "number of edges (transmission lines) k = 4 # number", "only to generate the network graph # x-y coordinates #", "generators # transmission line capacities = TIME = 0 Pmax", "contingency g = Variable(k) flows = [] obj = Minimize(c.T*g)", "d = np.matrix(\"\"\" 1.6154 2.3405 1.0868 1.5293 2.2197 1.0148 1.2083", "-A*A.T Ad = Ad - np.diag(np.diag(Ad)) epsx = 0.05; epsy", "3.9528, 1.7051, 2.6228, 4.7419, 4.6676, \"\"\") Gmax = np.matrix(\"3; 2;", "tic ANSWERS.append(val) pass #print val pass #print g.value # the", "0 3 \"\"\"); # node adjacency matrix Ad = -A*A.T", "0 0 0 0 0 ; 0 0 0 0", "0 0 0 0 0 1 1 0 0 0", "0 ; 0 0 -1 -1 0 0 0 0", "numpy as np n = 12 # total number of", "abs(flows[i]) <= Pmax.T ) prob = Problem(obj, constraints) tic =", "0 0 0 0 0 -1 0 0 0 0", "1.4 2.5; 0 3 \"\"\"); # node adjacency matrix Ad", "redundancy constraints.append(A*flows[i] == vstack(-g, d.T)) constraints.append( flows[i][i] == 0 )", "as plt for i in range(12): #plot edges for j", "0 0 1 -1 0 0 0 0 0 0", "1 0 0 0 0 -1 1 0 0 0", "problem # it is used only to generate the network", "; 1 0 0 0 1 -1 0 0 0", "j in range(k,n): pass #plt.plot(XY[j,0],XY[j,1],'ko') pass #plt.axis('off') pass #plt.savefig('pwr_net.eps') if", "for j in range(i): if Ad[i,j] == 1: pass #plt.plot((XY[i,0],", "#plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1)) for j in range(k,n): pass #plt.plot(XY[j,0],XY[j,1],'ko') pass #plt.axis('off') pass", "0 0 0 0 0 0 -1 ; 0 0", "0 0 -1 0 -1 0 ; 1 0 0", "0 0 0 0 0 ; 0 0 0 1", "in range(k): #plot nodes pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12) pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1)) for", "# text placing offset # plotting import matplotlib.pyplot as plt", "5.2; 4.9 5; 6.9 3.5; 1.9 3.5; 0.2 4.4; 3.2", "epsy = 0.15; # text placing offset # plotting import", "x-y coordinates # each row is node x-y coordinates XY", "0 0 0 0 0 0 0 0 ; 0", "0 0 0 0 -1 1 0 \"\"\") g =", "(transmission lines) k = 4 # number of generators #", "# N - 1 contingency g = Variable(k) flows =", "0 0 0 -1 0 0 0 0 0 0", "3.9 3.6; 5.9 2.5; 3.9 3; 1.4 2.5; 0 3", "1 1 0 -1 0 1 -1 0 0 0", "* import copy import time # data for power flow", "5; 3\") # supply generator costs d = np.matrix(\"\"\" 1.6154", "incidence matrix A = np.matrix(\"\"\" -1 -1 0 0 0", "0.2 4.4; 3.2 4.8; 5.9 4.5; 3.9 3.6; 5.9 2.5;", "= Minimize(c.T*g) for i in range(m): flows.append(Variable(m)) constraints = [g", "5.9 4.5; 3.9 3.6; 5.9 2.5; 3.9 3; 1.4 2.5;", "# the code below is not data for the problem", "network graph # x-y coordinates # each row is node", "6.9 3.5; 1.9 3.5; 0.2 4.4; 3.2 4.8; 5.9 4.5;", "demands # graph incidence matrix A = np.matrix(\"\"\" -1 -1", "1 0 0 0 0 0 0 0 0 ;", "== 0 ) constraints.append( abs(flows[i]) <= Pmax.T ) prob =", "; 0 0 0 0 0 0 0 0 0", "as np from cvxpy import * import copy import time", "= [A*p == vstack(-g, d.T), abs(p) <= Pmax.T, 0 <=", "k = 4 # number of generators # transmission line", "is used only to generate the network graph # x-y", "0 0 ; 0 0 -1 -1 0 0 0", "<reponame>jasondark/cvxpy<filename>cvxpy/cvxcore/tests/python/364A_scripts/power_lines.py import numpy as np from cvxpy import * import", "#plt.plot(XY[j,0],XY[j,1],'rs', markersize=12) pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1)) for j in range(k,n): pass #plt.plot(XY[j,0],XY[j,1],'ko')", "0 0 0 0 0 1 1 -1 0 0", "0 0 0 0 0 0 0 0 1 1", "Variable(k) flows = [] obj = Minimize(c.T*g) for i in", "0 0 ; 0 0 0 0 0 1 0", "numpy as np from cvxpy import * import copy import", "0 0 0 -1 0 1 0 0 1 ;", "used only to generate the network graph # x-y coordinates", "power demands # graph incidence matrix A = np.matrix(\"\"\" -1", "1 1 -1 0 0 0 0 ; 0 0", "flows.append(Variable(m)) constraints = [g <= Gmax, 0 <= g] for", "-1 1 0 0 0 0 0 0 0 0", "number of nodes m = 18 # number of edges", "0 0 0 0 0 0 0 1 1 -1", "network power demands # graph incidence matrix A = np.matrix(\"\"\"", "matplotlib.pyplot as plt for i in range(12): #plot edges for", "= np.matrix(\"\"\" 4.8005, 1.9246, 3.4274, 2.9439, 4.5652, 4.0484, 2.8259, 1.0740,", "0.05; epsy = 0.15; # text placing offset # plotting", "Pmax.T ) prob = Problem(obj, constraints) tic = time.time() val", "0 0 0 0 0 0 0 0 0 -1", "1 ; 0 0 0 0 0 0 0 0", "0 0 0 0 0 0 -1 1 0 \"\"\")", "import numpy as np from cvxpy import * import copy", "generator costs d = np.matrix(\"\"\" 1.6154 2.3405 1.0868 1.5293 2.2197", "if Ad[i,j] == 1: pass #plt.plot((XY[i,0], XY[j,0]), (XY[i,1], XY[j,1]), 'k-')", "prob = Problem(obj, constraints) tic = time.time() val = prob.solve()", "as np n = 12 # total number of nodes", "<= Gmax] prob = Problem(obj, constraints) tic = time.time() val", "range(k,n): pass #plt.plot(XY[j,0],XY[j,1],'ko') pass #plt.axis('off') pass #plt.savefig('pwr_net.eps') if __name__ ==", "0 0 0 1 1 -1 0 0 0 0", "0 0 0 0 0 0 0 1 0 1", "Minimize(c.T*g) constraints = [A*p == vstack(-g, d.T), abs(p) <= Pmax.T,", "4.2856, 2.7788, 3.4617, 4.1677, 4.6873, 3.9528, 1.7051, 2.6228, 4.7419, 4.6676,", "0 0 1 ; 0 0 0 0 0 0", "0 0 -1 1 0 \"\"\") g = Variable(k) p", "# supply generator costs d = np.matrix(\"\"\" 1.6154 2.3405 1.0868", "data for power flow problem import numpy as np n", "0 0 0 0 -1 0 -1 0 ; 1", "0 1 0 0 ; 0 0 0 0 0", "<= Gmax, 0 <= g] for i in range(m): #", "= 18 # number of edges (transmission lines) k =", "1 0 0 0 0 0 0 0 0 0", "# graph incidence matrix A = np.matrix(\"\"\" -1 -1 0", "generator power c = np.matrix(\"4; 8; 5; 3\") # supply", "1 0 0 0 1 -1 0 0 0 0", "np.diag(np.diag(Ad)) epsx = 0.05; epsy = 0.15; # text placing", "0 0 0 0 0 1 -1 0 0 0", "0 0 0 0 1 1 0 0 0 -1", "costs d = np.matrix(\"\"\" 1.6154 2.3405 1.0868 1.5293 2.2197 1.0148", "0 0 -1 0 0 0 0 0 0 0", "1: pass #plt.plot((XY[i,0], XY[j,0]), (XY[i,1], XY[j,1]), 'k-') for j in", "Gmax] prob = Problem(obj, constraints) tic = time.time() val =", "c = np.matrix(\"4; 8; 5; 3\") # supply generator costs", "7\") # maximum generator power c = np.matrix(\"4; 8; 5;", "= np.matrix(\"\"\" 1.5 5.2; 4.9 5; 6.9 3.5; 1.9 3.5;", "range(12): #plot edges for j in range(i): if Ad[i,j] ==", "-1 ; 0 0 0 0 0 0 -1 0", "generate the network graph # x-y coordinates # each row", "0 0 0 0 1 0 0 0 0 0", "18 # number of edges (transmission lines) k = 4", "1.5 5.2; 4.9 5; 6.9 3.5; 1.9 3.5; 0.2 4.4;", "the problem # it is used only to generate the", "# number of edges (transmission lines) k = 4 #", "3 \"\"\"); # node adjacency matrix Ad = -A*A.T Ad", "4.7419, 4.6676, \"\"\") Gmax = np.matrix(\"3; 2; 4; 7\") #", "row is node x-y coordinates XY = np.matrix(\"\"\" 1.5 5.2;", "3\") # supply generator costs d = np.matrix(\"\"\" 1.6154 2.3405", "-1 redundancy constraints.append(A*flows[i] == vstack(-g, d.T)) constraints.append( flows[i][i] == 0", "0 0 1 1 -1 0 0 0 0 ;", "0.15; # text placing offset # plotting import matplotlib.pyplot as", "1.0868 1.5293 2.2197 1.0148 1.2083 1.3041 \"\"\")# network power demands", "= prob.solve() toc = time.time() TIME += toc - tic", "# x-y coordinates # each row is node x-y coordinates", "; 0 0 0 1 0 0 0 0 -1", "val pass #print g.value # N - 1 contingency g", "0 <= g, g <= Gmax] prob = Problem(obj, constraints)", "graph # x-y coordinates # each row is node x-y", "# transmission line capacities = TIME = 0 Pmax =", "adjacency matrix Ad = -A*A.T Ad = Ad - np.diag(np.diag(Ad))", "2.3405 1.0868 1.5293 2.2197 1.0148 1.2083 1.3041 \"\"\")# network power", "0 0 0 0 0 1 0 0 0 0", "[g <= Gmax, 0 <= g] for i in range(m):", "Ad = Ad - np.diag(np.diag(Ad)) epsx = 0.05; epsy =", "2.9439, 4.5652, 4.0484, 2.8259, 1.0740, 4.2856, 2.7788, 3.4617, 4.1677, 4.6873,", "for j in range(k): #plot nodes pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12) pass", "time.time() val = prob.solve() toc = time.time() TIME += toc", "constraints.append( abs(flows[i]) <= Pmax.T ) prob = Problem(obj, constraints) tic", "0 0 0 0 0 0 0 1 -1 0", "0 0 ; 0 1 1 0 -1 0 1", "4.9 5; 6.9 3.5; 1.9 3.5; 0.2 4.4; 3.2 4.8;", "'k-') for j in range(k): #plot nodes pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12)", "- tic ANSWERS.append(val) pass #print val pass #print g.value #", "import matplotlib.pyplot as plt for i in range(12): #plot edges", "0 0 1 0 1 0 0 ; 0 0", "#print g.value # the code below is not data for", "= [] obj = Minimize(c.T*g) for i in range(m): flows.append(Variable(m))", "0 -1 -1 0 0 0 0 0 0 0", "of generators # transmission line capacities = TIME = 0", "g.value # N - 1 contingency g = Variable(k) flows", "0 0 0 0 0 0 1 1 -1 0", "cvxpy import * import copy import time # data for", "tic = time.time() val = prob.solve() toc = time.time() TIME", "4.6873, 3.9528, 1.7051, 2.6228, 4.7419, 4.6676, \"\"\") Gmax = np.matrix(\"3;", "#plt.plot((XY[i,0], XY[j,0]), (XY[i,1], XY[j,1]), 'k-') for j in range(k): #plot", "0 0 0 0 -1 1 0 0 0 0", "plotting import matplotlib.pyplot as plt for i in range(12): #plot", "0 ) constraints.append( abs(flows[i]) <= Pmax.T ) prob = Problem(obj,", "= np.matrix(\"\"\" 1.6154 2.3405 1.0868 1.5293 2.2197 1.0148 1.2083 1.3041", "pass #print val pass #print g.value # N - 1", "ANSWERS.append(val) pass #print val pass #print g.value # the code", "3.4617, 4.1677, 4.6873, 3.9528, 1.7051, 2.6228, 4.7419, 4.6676, \"\"\") Gmax", "np.matrix(\"\"\" 4.8005, 1.9246, 3.4274, 2.9439, 4.5652, 4.0484, 2.8259, 1.0740, 4.2856,", "in range(m): # N -1 redundancy constraints.append(A*flows[i] == vstack(-g, d.T))", "obj = Minimize(c.T*g) for i in range(m): flows.append(Variable(m)) constraints =", "tic ANSWERS.append(val) pass #print val pass #print g.value # N", "\"\"\") Gmax = np.matrix(\"3; 2; 4; 7\") # maximum generator", "number of generators # transmission line capacities = TIME =", "0 0 0 ; 0 0 0 1 0 0", "3.5; 0.2 4.4; 3.2 4.8; 5.9 4.5; 3.9 3.6; 5.9", "power flow problem import numpy as np n = 12", "data for the problem # it is used only to", "range(m): flows.append(Variable(m)) constraints = [g <= Gmax, 0 <= g]", "= Ad - np.diag(np.diag(Ad)) epsx = 0.05; epsy = 0.15;", "p = Variable(m) obj = Minimize(c.T*g) constraints = [A*p ==", "0 0 0 0 0 0 -1 0 0 0", "-1 0 0 0 0 0 0 0 0 0", "0 0 1 0 0 0 0 -1 1 0", "0 0 0 0 1 1 -1 0 0 0", "0 0 1 0 0 0 0 0 0 0", "range(i): if Ad[i,j] == 1: pass #plt.plot((XY[i,0], XY[j,0]), (XY[i,1], XY[j,1]),", "= Variable(k) flows = [] obj = Minimize(c.T*g) for i", "constraints.append(A*flows[i] == vstack(-g, d.T)) constraints.append( flows[i][i] == 0 ) constraints.append(", "node adjacency matrix Ad = -A*A.T Ad = Ad -", "-1 0 1 -1 0 0 0 0 0 0", "0 0 -1 -1 0 0 0 0 0 0", "problem import numpy as np n = 12 # total", "0 1 0 1 0 0 ; 0 0 0", "0 0 -1 0 1 0 0 1 ; 0", "0 0 0 -1 ; 0 0 0 0 0", "d.T), abs(p) <= Pmax.T, 0 <= g, g <= Gmax]", "1 1 0 0 0 -1 0 1 0 0", "; 0 0 0 0 0 0 -1 0 0", "= 0.05; epsy = 0.15; # text placing offset #", "1.0740, 4.2856, 2.7788, 3.4617, 4.1677, 4.6873, 3.9528, 1.7051, 2.6228, 4.7419,", "j in range(i): if Ad[i,j] == 1: pass #plt.plot((XY[i,0], XY[j,0]),", "power c = np.matrix(\"4; 8; 5; 3\") # supply generator", "1.9 3.5; 0.2 4.4; 3.2 4.8; 5.9 4.5; 3.9 3.6;", "(XY[i,1], XY[j,1]), 'k-') for j in range(k): #plot nodes pass", "1 -1 0 0 0 0 0 0 0 0", "0 0 0 0 0 0 0 0 1 -1", "= Problem(obj, constraints) tic = time.time() val = prob.solve() toc", "XY[j,0]), (XY[i,1], XY[j,1]), 'k-') for j in range(k): #plot nodes", "for the problem # it is used only to generate", "1 0 0 1 ; 0 0 0 0 0", "1 contingency g = Variable(k) flows = [] obj =", "# total number of nodes m = 18 # number", "0 ; 0 0 0 1 0 0 0 0", "XY[j,1]), 'k-') for j in range(k): #plot nodes pass #plt.plot(XY[j,0],XY[j,1],'rs',", "= time.time() val = prob.solve() toc = time.time() TIME +=", "4 # number of generators # transmission line capacities =", "#print val pass #print g.value # the code below is", "4.5652, 4.0484, 2.8259, 1.0740, 4.2856, 2.7788, 3.4617, 4.1677, 4.6873, 3.9528,", "0 0 0 0 -1 0 0 0 0 0", "pass #print g.value # the code below is not data", "0 0 0 0 0 -1 1 0 \"\"\") g", "i in range(m): # N -1 redundancy constraints.append(A*flows[i] == vstack(-g,", "#plt.plot(XY[j,0],XY[j,1],'ko') pass #plt.axis('off') pass #plt.savefig('pwr_net.eps') if __name__ == '__main__': pass", "of nodes m = 18 # number of edges (transmission", "0 0 -1 1 0 0 0 0 0 0", "0 0 0 0 ; 0 0 -1 -1 0" ]
[ "difference is 1, which is the difference between 2 and", "not node: return self.minDiff.append(node.val) L = travel(node.left) R = travel(node.right)", "node: return self.minDiff.append(node.val) L = travel(node.left) R = travel(node.right) travel(root)", "2 # # Output: # 1 # # Explanation: #", "2 and 1 (or between 2 and 3). # Definition", "root): \"\"\" :type root: TreeNode :rtype: int \"\"\" self.minDiff =", "minimum absolute difference between values of any two nodes. #", "= travel(node.left) R = travel(node.right) travel(root) self.minDiff = sorted(self.minDiff) return", "a binary tree node. # class TreeNode: # def __init__(self,", "self.left = None # self.right = None class Solution: def", "# 1 # \\ # 3 # / # 2", "def __init__(self, x): # self.val = x # self.left =", "TreeNode: # def __init__(self, x): # self.val = x #", "non-negative values, find the minimum absolute difference between values of", "3 # / # 2 # # Output: # 1", "for a binary tree node. # class TreeNode: # def", "absolute difference is 1, which is the difference between 2", "(or between 2 and 3). # Definition for a binary", "tree node. # class TreeNode: # def __init__(self, x): #", "is the difference between 2 and 1 (or between 2", "between values of any two nodes. # # Example: #", "2 and 3). # Definition for a binary tree node.", "class Solution: def getMinimumDifference(self, root): \"\"\" :type root: TreeNode :rtype:", "1 (or between 2 and 3). # Definition for a", "binary tree node. # class TreeNode: # def __init__(self, x):", "# def __init__(self, x): # self.val = x # self.left", "= None # self.right = None class Solution: def getMinimumDifference(self,", "self.minDiff = [] def travel(node): if not node: return self.minDiff.append(node.val)", "sorted(self.minDiff) return min(abs(a - b) for a, b in zip(self.minDiff,", "# # Output: # 1 # # Explanation: # The", "search tree with non-negative values, find the minimum absolute difference", "x): # self.val = x # self.left = None #", "binary search tree with non-negative values, find the minimum absolute", "1, which is the difference between 2 and 1 (or", "and 1 (or between 2 and 3). # Definition for", "# Given a binary search tree with non-negative values, find", "# Binary Search Tree # Given a binary search tree", "Definition for a binary tree node. # class TreeNode: #", "two nodes. # # Example: # # Input: # #", "# Explanation: # The minimum absolute difference is 1, which", "# # Input: # # 1 # \\ # 3", "the difference between 2 and 1 (or between 2 and", "# The minimum absolute difference is 1, which is the", "# Example: # # Input: # # 1 # \\", "# # 1 # \\ # 3 # / #", "# self.val = x # self.left = None # self.right", "self.val = x # self.left = None # self.right =", "\\ # 3 # / # 2 # # Output:", "the minimum absolute difference between values of any two nodes.", "# 1 # # Explanation: # The minimum absolute difference", "TreeNode :rtype: int \"\"\" self.minDiff = [] def travel(node): if", "# / # 2 # # Output: # 1 #", "between 2 and 3). # Definition for a binary tree", "None class Solution: def getMinimumDifference(self, root): \"\"\" :type root: TreeNode", "class TreeNode: # def __init__(self, x): # self.val = x", "= None class Solution: def getMinimumDifference(self, root): \"\"\" :type root:", "Solution: def getMinimumDifference(self, root): \"\"\" :type root: TreeNode :rtype: int", "with non-negative values, find the minimum absolute difference between values", "Given a binary search tree with non-negative values, find the", "Explanation: # The minimum absolute difference is 1, which is", "The minimum absolute difference is 1, which is the difference", "Search Tree # Given a binary search tree with non-negative", "difference between values of any two nodes. # # Example:", "nodes. # # Example: # # Input: # # 1", "# # Explanation: # The minimum absolute difference is 1,", "Input: # # 1 # \\ # 3 # /", "= sorted(self.minDiff) return min(abs(a - b) for a, b in", "3). # Definition for a binary tree node. # class", "difference between 2 and 1 (or between 2 and 3).", "of any two nodes. # # Example: # # Input:", "Example: # # Input: # # 1 # \\ #", "1 # \\ # 3 # / # 2 #", "x # self.left = None # self.right = None class", ":rtype: int \"\"\" self.minDiff = [] def travel(node): if not", "\"\"\" :type root: TreeNode :rtype: int \"\"\" self.minDiff = []", "between 2 and 1 (or between 2 and 3). #", "= [] def travel(node): if not node: return self.minDiff.append(node.val) L", "# class TreeNode: # def __init__(self, x): # self.val =", "def travel(node): if not node: return self.minDiff.append(node.val) L = travel(node.left)", "root: TreeNode :rtype: int \"\"\" self.minDiff = [] def travel(node):", "self.minDiff.append(node.val) L = travel(node.left) R = travel(node.right) travel(root) self.minDiff =", "# 3 # / # 2 # # Output: #", "= travel(node.right) travel(root) self.minDiff = sorted(self.minDiff) return min(abs(a - b)", "return min(abs(a - b) for a, b in zip(self.minDiff, self.minDiff[1:]))", "# Input: # # 1 # \\ # 3 #", "/ # 2 # # Output: # 1 # #", "any two nodes. # # Example: # # Input: #", "# \\ # 3 # / # 2 # #", "travel(root) self.minDiff = sorted(self.minDiff) return min(abs(a - b) for a,", "1 # # Explanation: # The minimum absolute difference is", "# self.right = None class Solution: def getMinimumDifference(self, root): \"\"\"", "which is the difference between 2 and 1 (or between", "if not node: return self.minDiff.append(node.val) L = travel(node.left) R =", "Tree # Given a binary search tree with non-negative values,", "__init__(self, x): # self.val = x # self.left = None", "# # Example: # # Input: # # 1 #", "None # self.right = None class Solution: def getMinimumDifference(self, root):", "# self.left = None # self.right = None class Solution:", "getMinimumDifference(self, root): \"\"\" :type root: TreeNode :rtype: int \"\"\" self.minDiff", "a binary search tree with non-negative values, find the minimum", "is 1, which is the difference between 2 and 1", "travel(node.left) R = travel(node.right) travel(root) self.minDiff = sorted(self.minDiff) return min(abs(a", "# 2 # # Output: # 1 # # Explanation:", "int \"\"\" self.minDiff = [] def travel(node): if not node:", "values of any two nodes. # # Example: # #", "Binary Search Tree # Given a binary search tree with", "and 3). # Definition for a binary tree node. #", "\"\"\" self.minDiff = [] def travel(node): if not node: return", "return self.minDiff.append(node.val) L = travel(node.left) R = travel(node.right) travel(root) self.minDiff", "tree with non-negative values, find the minimum absolute difference between", "node. # class TreeNode: # def __init__(self, x): # self.val", "values, find the minimum absolute difference between values of any", "Output: # 1 # # Explanation: # The minimum absolute", "find the minimum absolute difference between values of any two", "# Output: # 1 # # Explanation: # The minimum", "minimum absolute difference is 1, which is the difference between", "absolute difference between values of any two nodes. # #", "travel(node): if not node: return self.minDiff.append(node.val) L = travel(node.left) R", "= x # self.left = None # self.right = None", "R = travel(node.right) travel(root) self.minDiff = sorted(self.minDiff) return min(abs(a -", "# Definition for a binary tree node. # class TreeNode:", "self.minDiff = sorted(self.minDiff) return min(abs(a - b) for a, b", "travel(node.right) travel(root) self.minDiff = sorted(self.minDiff) return min(abs(a - b) for", "def getMinimumDifference(self, root): \"\"\" :type root: TreeNode :rtype: int \"\"\"", "self.right = None class Solution: def getMinimumDifference(self, root): \"\"\" :type", "[] def travel(node): if not node: return self.minDiff.append(node.val) L =", ":type root: TreeNode :rtype: int \"\"\" self.minDiff = [] def", "L = travel(node.left) R = travel(node.right) travel(root) self.minDiff = sorted(self.minDiff)" ]
[ "return code def lastupdate(self, date): return datetime.datetime.strptime(date, self.iso_format).strftime(\"%Y-%m-%d\") def since(self,", "from company.choices import fr as choices from mighty.errors import BackendError", "message = None since_format = None iso_format = '%Y-%m-%dT%H:%M:%S.%f%z' def", "None since_format = None iso_format = '%Y-%m-%dT%H:%M:%S.%f%z' def in_error(self, message):", "get_companies(self, companies, response_code): raise NotImplementedError(\"Subclasses should implement get_companies()\") def get_company_by_siren(self,", "datetime.datetime.strptime(date, self.since_format).strftime(\"%Y-%m-%d\") def get_companies(self, companies, response_code): raise NotImplementedError(\"Subclasses should implement", "dict(choices.SLICE_EFFECTIVE) class SearchBackend: message = None since_format = None iso_format", "in_error(self, message): self.message = message def backend_error(self, msg): raise BackendError(msg)", "get_legalform_str(self, code): try: code = int(code) return CHOICES_LEGALFORM[code] except Exception:", "logging.getLogger(__name__) CHOICES_APE = dict(choices.APE) CHOICES_LEGALFORM = dict(choices.LEGALFORM) CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE)", "code = int(code) return CHOICES_LEGALFORM[code] except Exception: pass return code", "except Exception: pass return code def get_legalform_str(self, code): try: code", "def get_legalform_str(self, code): try: code = int(code) return CHOICES_LEGALFORM[code] except", "date): return datetime.datetime.strptime(date, self.iso_format).strftime(\"%Y-%m-%d\") def since(self, date): logger.warning(date) return datetime.datetime.strptime(date,", "NotImplementedError(\"Subclasses should implement get_companies()\") def get_company_by_siren(self, siren): raise NotImplementedError(\"Subclasses should", "CHOICES_LEGALFORM = dict(choices.LEGALFORM) CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE) class SearchBackend: message =", "if str(response_code)[0] == '4': self.in_error(companies[0]['message']) elif str(response_code)[0] == '5': self.in_error('error", "'4': self.in_error(companies[0]['message']) elif str(response_code)[0] == '5': self.in_error('error server') return companies", "should implement get_company_by_fulltext()\") def get_active_companies(self, fulltext): raise NotImplementedError(\"Subclasses should implement", "implement get_company_by_siren()\") def get_company_by_fulltext(self, fulltext): raise NotImplementedError(\"Subclasses should implement get_company_by_fulltext()\")", "return datetime.datetime.strptime(date, self.iso_format).strftime(\"%Y-%m-%d\") def since(self, date): logger.warning(date) return datetime.datetime.strptime(date, self.since_format).strftime(\"%Y-%m-%d\")", "def get_company_by_fulltext(self, fulltext): raise NotImplementedError(\"Subclasses should implement get_company_by_fulltext()\") def get_active_companies(self,", "def since(self, date): logger.warning(date) return datetime.datetime.strptime(date, self.since_format).strftime(\"%Y-%m-%d\") def get_companies(self, companies,", "import fr as choices from mighty.errors import BackendError import datetime,", "= int(code) return CHOICES_LEGALFORM[code] except Exception: pass return code def", "class SearchBackend: message = None since_format = None iso_format =", "CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE) class SearchBackend: message = None since_format =", "SearchBackend: message = None since_format = None iso_format = '%Y-%m-%dT%H:%M:%S.%f%z'", "since_format = None iso_format = '%Y-%m-%dT%H:%M:%S.%f%z' def in_error(self, message): self.message", "= None iso_format = '%Y-%m-%dT%H:%M:%S.%f%z' def in_error(self, message): self.message =", "= dict(choices.SLICE_EFFECTIVE) class SearchBackend: message = None since_format = None", "== '5': self.in_error('error server') return companies def get_ape_str(self, code): try:", "as choices from mighty.errors import BackendError import datetime, logging logger", "get_companies()\") def get_company_by_siren(self, siren): raise NotImplementedError(\"Subclasses should implement get_company_by_siren()\") def", "mighty.errors import BackendError import datetime, logging logger = logging.getLogger(__name__) CHOICES_APE", "BackendError import datetime, logging logger = logging.getLogger(__name__) CHOICES_APE = dict(choices.APE)", "None iso_format = '%Y-%m-%dT%H:%M:%S.%f%z' def in_error(self, message): self.message = message", "return CHOICES_APE[code] except Exception: pass return code def get_legalform_str(self, code):", "pass return code def get_legalform_str(self, code): try: code = int(code)", "response_code): if str(response_code)[0] == '4': self.in_error(companies[0]['message']) elif str(response_code)[0] == '5':", "pass return code def lastupdate(self, date): return datetime.datetime.strptime(date, self.iso_format).strftime(\"%Y-%m-%d\") def", "= dict(choices.APE) CHOICES_LEGALFORM = dict(choices.LEGALFORM) CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE) class SearchBackend:", "companies(self, companies, response_code): if str(response_code)[0] == '4': self.in_error(companies[0]['message']) elif str(response_code)[0]", "implement get_company_by_fulltext()\") def get_active_companies(self, fulltext): raise NotImplementedError(\"Subclasses should implement get_active_companies()\")", "response_code): raise NotImplementedError(\"Subclasses should implement get_companies()\") def get_company_by_siren(self, siren): raise", "= '%Y-%m-%dT%H:%M:%S.%f%z' def in_error(self, message): self.message = message def backend_error(self,", "code): try: return CHOICES_SLICE[code] except Exception: pass return code def", "= message def backend_error(self, msg): raise BackendError(msg) def companies(self, companies,", "Exception: pass return code def get_slice_str(self, code): try: return CHOICES_SLICE[code]", "CHOICES_SLICE[code] except Exception: pass return code def lastupdate(self, date): return", "except Exception: pass return code def lastupdate(self, date): return datetime.datetime.strptime(date,", "from mighty.errors import BackendError import datetime, logging logger = logging.getLogger(__name__)", "def in_error(self, message): self.message = message def backend_error(self, msg): raise", "datetime, logging logger = logging.getLogger(__name__) CHOICES_APE = dict(choices.APE) CHOICES_LEGALFORM =", "message): self.message = message def backend_error(self, msg): raise BackendError(msg) def", "iso_format = '%Y-%m-%dT%H:%M:%S.%f%z' def in_error(self, message): self.message = message def", "try: code = int(code) return CHOICES_LEGALFORM[code] except Exception: pass return", "get_company_by_fulltext(self, fulltext): raise NotImplementedError(\"Subclasses should implement get_company_by_fulltext()\") def get_active_companies(self, fulltext):", "raise NotImplementedError(\"Subclasses should implement get_company_by_siren()\") def get_company_by_fulltext(self, fulltext): raise NotImplementedError(\"Subclasses", "company.choices import fr as choices from mighty.errors import BackendError import", "dict(choices.LEGALFORM) CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE) class SearchBackend: message = None since_format", "backend_error(self, msg): raise BackendError(msg) def companies(self, companies, response_code): if str(response_code)[0]", "str(response_code)[0] == '4': self.in_error(companies[0]['message']) elif str(response_code)[0] == '5': self.in_error('error server')", "'5': self.in_error('error server') return companies def get_ape_str(self, code): try: return", "CHOICES_APE[code] except Exception: pass return code def get_legalform_str(self, code): try:", "return code def get_slice_str(self, code): try: return CHOICES_SLICE[code] except Exception:", "def lastupdate(self, date): return datetime.datetime.strptime(date, self.iso_format).strftime(\"%Y-%m-%d\") def since(self, date): logger.warning(date)", "import datetime, logging logger = logging.getLogger(__name__) CHOICES_APE = dict(choices.APE) CHOICES_LEGALFORM", "code def get_legalform_str(self, code): try: code = int(code) return CHOICES_LEGALFORM[code]", "'%Y-%m-%dT%H:%M:%S.%f%z' def in_error(self, message): self.message = message def backend_error(self, msg):", "fulltext): raise NotImplementedError(\"Subclasses should implement get_company_by_fulltext()\") def get_active_companies(self, fulltext): raise", "def backend_error(self, msg): raise BackendError(msg) def companies(self, companies, response_code): if", "companies def get_ape_str(self, code): try: return CHOICES_APE[code] except Exception: pass", "raise NotImplementedError(\"Subclasses should implement get_company_by_fulltext()\") def get_active_companies(self, fulltext): raise NotImplementedError(\"Subclasses", "<gh_stars>0 from company.choices import fr as choices from mighty.errors import", "BackendError(msg) def companies(self, companies, response_code): if str(response_code)[0] == '4': self.in_error(companies[0]['message'])", "= dict(choices.LEGALFORM) CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE) class SearchBackend: message = None", "dict(choices.APE) CHOICES_LEGALFORM = dict(choices.LEGALFORM) CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE) class SearchBackend: message", "get_company_by_siren()\") def get_company_by_fulltext(self, fulltext): raise NotImplementedError(\"Subclasses should implement get_company_by_fulltext()\") def", "NotImplementedError(\"Subclasses should implement get_company_by_siren()\") def get_company_by_fulltext(self, fulltext): raise NotImplementedError(\"Subclasses should", "since(self, date): logger.warning(date) return datetime.datetime.strptime(date, self.since_format).strftime(\"%Y-%m-%d\") def get_companies(self, companies, response_code):", "date): logger.warning(date) return datetime.datetime.strptime(date, self.since_format).strftime(\"%Y-%m-%d\") def get_companies(self, companies, response_code): raise", "def companies(self, companies, response_code): if str(response_code)[0] == '4': self.in_error(companies[0]['message']) elif", "siren): raise NotImplementedError(\"Subclasses should implement get_company_by_siren()\") def get_company_by_fulltext(self, fulltext): raise", "def get_company_by_siren(self, siren): raise NotImplementedError(\"Subclasses should implement get_company_by_siren()\") def get_company_by_fulltext(self,", "lastupdate(self, date): return datetime.datetime.strptime(date, self.iso_format).strftime(\"%Y-%m-%d\") def since(self, date): logger.warning(date) return", "elif str(response_code)[0] == '5': self.in_error('error server') return companies def get_ape_str(self,", "raise BackendError(msg) def companies(self, companies, response_code): if str(response_code)[0] == '4':", "pass return code def get_slice_str(self, code): try: return CHOICES_SLICE[code] except", "get_ape_str(self, code): try: return CHOICES_APE[code] except Exception: pass return code", "def get_ape_str(self, code): try: return CHOICES_APE[code] except Exception: pass return", "Exception: pass return code def lastupdate(self, date): return datetime.datetime.strptime(date, self.iso_format).strftime(\"%Y-%m-%d\")", "return CHOICES_LEGALFORM[code] except Exception: pass return code def get_slice_str(self, code):", "self.in_error(companies[0]['message']) elif str(response_code)[0] == '5': self.in_error('error server') return companies def", "get_slice_str(self, code): try: return CHOICES_SLICE[code] except Exception: pass return code", "self.message = message def backend_error(self, msg): raise BackendError(msg) def companies(self,", "self.iso_format).strftime(\"%Y-%m-%d\") def since(self, date): logger.warning(date) return datetime.datetime.strptime(date, self.since_format).strftime(\"%Y-%m-%d\") def get_companies(self,", "NotImplementedError(\"Subclasses should implement get_company_by_fulltext()\") def get_active_companies(self, fulltext): raise NotImplementedError(\"Subclasses should", "logger.warning(date) return datetime.datetime.strptime(date, self.since_format).strftime(\"%Y-%m-%d\") def get_companies(self, companies, response_code): raise NotImplementedError(\"Subclasses", "try: return CHOICES_SLICE[code] except Exception: pass return code def lastupdate(self,", "code def get_slice_str(self, code): try: return CHOICES_SLICE[code] except Exception: pass", "return datetime.datetime.strptime(date, self.since_format).strftime(\"%Y-%m-%d\") def get_companies(self, companies, response_code): raise NotImplementedError(\"Subclasses should", "msg): raise BackendError(msg) def companies(self, companies, response_code): if str(response_code)[0] ==", "def get_companies(self, companies, response_code): raise NotImplementedError(\"Subclasses should implement get_companies()\") def", "def get_slice_str(self, code): try: return CHOICES_SLICE[code] except Exception: pass return", "logging logger = logging.getLogger(__name__) CHOICES_APE = dict(choices.APE) CHOICES_LEGALFORM = dict(choices.LEGALFORM)", "int(code) return CHOICES_LEGALFORM[code] except Exception: pass return code def get_slice_str(self,", "code): try: return CHOICES_APE[code] except Exception: pass return code def", "self.since_format).strftime(\"%Y-%m-%d\") def get_companies(self, companies, response_code): raise NotImplementedError(\"Subclasses should implement get_companies()\")", "CHOICES_APE = dict(choices.APE) CHOICES_LEGALFORM = dict(choices.LEGALFORM) CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE) class", "Exception: pass return code def get_legalform_str(self, code): try: code =", "self.in_error('error server') return companies def get_ape_str(self, code): try: return CHOICES_APE[code]", "message def backend_error(self, msg): raise BackendError(msg) def companies(self, companies, response_code):", "return code def get_legalform_str(self, code): try: code = int(code) return", "code): try: code = int(code) return CHOICES_LEGALFORM[code] except Exception: pass", "return CHOICES_SLICE[code] except Exception: pass return code def lastupdate(self, date):", "CHOICES_LEGALFORM[code] except Exception: pass return code def get_slice_str(self, code): try:", "server') return companies def get_ape_str(self, code): try: return CHOICES_APE[code] except", "should implement get_companies()\") def get_company_by_siren(self, siren): raise NotImplementedError(\"Subclasses should implement", "should implement get_company_by_siren()\") def get_company_by_fulltext(self, fulltext): raise NotImplementedError(\"Subclasses should implement", "fr as choices from mighty.errors import BackendError import datetime, logging", "get_company_by_siren(self, siren): raise NotImplementedError(\"Subclasses should implement get_company_by_siren()\") def get_company_by_fulltext(self, fulltext):", "str(response_code)[0] == '5': self.in_error('error server') return companies def get_ape_str(self, code):", "logger = logging.getLogger(__name__) CHOICES_APE = dict(choices.APE) CHOICES_LEGALFORM = dict(choices.LEGALFORM) CHOICES_SLICE", "datetime.datetime.strptime(date, self.iso_format).strftime(\"%Y-%m-%d\") def since(self, date): logger.warning(date) return datetime.datetime.strptime(date, self.since_format).strftime(\"%Y-%m-%d\") def", "import BackendError import datetime, logging logger = logging.getLogger(__name__) CHOICES_APE =", "companies, response_code): if str(response_code)[0] == '4': self.in_error(companies[0]['message']) elif str(response_code)[0] ==", "try: return CHOICES_APE[code] except Exception: pass return code def get_legalform_str(self,", "choices from mighty.errors import BackendError import datetime, logging logger =", "= logging.getLogger(__name__) CHOICES_APE = dict(choices.APE) CHOICES_LEGALFORM = dict(choices.LEGALFORM) CHOICES_SLICE =", "== '4': self.in_error(companies[0]['message']) elif str(response_code)[0] == '5': self.in_error('error server') return", "companies, response_code): raise NotImplementedError(\"Subclasses should implement get_companies()\") def get_company_by_siren(self, siren):", "implement get_companies()\") def get_company_by_siren(self, siren): raise NotImplementedError(\"Subclasses should implement get_company_by_siren()\")", "raise NotImplementedError(\"Subclasses should implement get_companies()\") def get_company_by_siren(self, siren): raise NotImplementedError(\"Subclasses", "code def lastupdate(self, date): return datetime.datetime.strptime(date, self.iso_format).strftime(\"%Y-%m-%d\") def since(self, date):", "except Exception: pass return code def get_slice_str(self, code): try: return", "return companies def get_ape_str(self, code): try: return CHOICES_APE[code] except Exception:", "= None since_format = None iso_format = '%Y-%m-%dT%H:%M:%S.%f%z' def in_error(self," ]
[ "= declarative_base() def get_db(): db = SessionLocal() try: yield db", "engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker import", "sqlalchemy.orm import sessionmaker import os SQLALCHEMY_DATABASE_URL = os.getenv(\"DATABASE_URL\").replace(\"postgres://\", \"postgresql+psycopg2://\") engine", "bind=engine) Base = declarative_base() def get_db(): db = SessionLocal() try:", "sessionmaker import os SQLALCHEMY_DATABASE_URL = os.getenv(\"DATABASE_URL\").replace(\"postgres://\", \"postgresql+psycopg2://\") engine = create_engine(SQLALCHEMY_DATABASE_URL)", "= create_engine(SQLALCHEMY_DATABASE_URL) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) Base = declarative_base()", "os SQLALCHEMY_DATABASE_URL = os.getenv(\"DATABASE_URL\").replace(\"postgres://\", \"postgresql+psycopg2://\") engine = create_engine(SQLALCHEMY_DATABASE_URL) SessionLocal =", "os.getenv(\"DATABASE_URL\").replace(\"postgres://\", \"postgresql+psycopg2://\") engine = create_engine(SQLALCHEMY_DATABASE_URL) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)", "sqlalchemy import create_engine, engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm", "import create_engine, engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import", "def get_db(): db = SessionLocal() try: yield db finally: db.close()", "sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker import os SQLALCHEMY_DATABASE_URL", "from sqlalchemy import create_engine, engine from sqlalchemy.ext.declarative import declarative_base from", "SQLALCHEMY_DATABASE_URL = os.getenv(\"DATABASE_URL\").replace(\"postgres://\", \"postgresql+psycopg2://\") engine = create_engine(SQLALCHEMY_DATABASE_URL) SessionLocal = sessionmaker(autocommit=False,", "<reponame>luisornelasch/melp from sqlalchemy import create_engine, engine from sqlalchemy.ext.declarative import declarative_base", "SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) Base = declarative_base() def get_db():", "autoflush=False, bind=engine) Base = declarative_base() def get_db(): db = SessionLocal()", "declarative_base() def get_db(): db = SessionLocal() try: yield db finally:", "Base = declarative_base() def get_db(): db = SessionLocal() try: yield", "from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker import os", "= sessionmaker(autocommit=False, autoflush=False, bind=engine) Base = declarative_base() def get_db(): db", "import declarative_base from sqlalchemy.orm import sessionmaker import os SQLALCHEMY_DATABASE_URL =", "from sqlalchemy.orm import sessionmaker import os SQLALCHEMY_DATABASE_URL = os.getenv(\"DATABASE_URL\").replace(\"postgres://\", \"postgresql+psycopg2://\")", "create_engine(SQLALCHEMY_DATABASE_URL) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) Base = declarative_base() def", "\"postgresql+psycopg2://\") engine = create_engine(SQLALCHEMY_DATABASE_URL) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) Base", "declarative_base from sqlalchemy.orm import sessionmaker import os SQLALCHEMY_DATABASE_URL = os.getenv(\"DATABASE_URL\").replace(\"postgres://\",", "sessionmaker(autocommit=False, autoflush=False, bind=engine) Base = declarative_base() def get_db(): db =", "= os.getenv(\"DATABASE_URL\").replace(\"postgres://\", \"postgresql+psycopg2://\") engine = create_engine(SQLALCHEMY_DATABASE_URL) SessionLocal = sessionmaker(autocommit=False, autoflush=False,", "import os SQLALCHEMY_DATABASE_URL = os.getenv(\"DATABASE_URL\").replace(\"postgres://\", \"postgresql+psycopg2://\") engine = create_engine(SQLALCHEMY_DATABASE_URL) SessionLocal", "engine = create_engine(SQLALCHEMY_DATABASE_URL) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) Base =", "create_engine, engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker", "import sessionmaker import os SQLALCHEMY_DATABASE_URL = os.getenv(\"DATABASE_URL\").replace(\"postgres://\", \"postgresql+psycopg2://\") engine =" ]
[ "get_bucket_instance_info(self, bucket_name, bucket_id = None): if not bucket_id: ep =", "return config str_config_opts = [ 'user_id', 'access_key', 'secret_key', 'host', 'ceph_conf',", "validate=True): return self.conn.get_bucket(name, validate=validate) class RGWRESTAdmin: def __init__(self, connection): self.conn", "in d: return pickle.loads(str(d['__pickle'])) return d class RPlacementRule: def __init__(self,", "self.rgw_rest_admin.get_zone_params() self.placement_targets = {} for e in self.zone_params.placement_pools: self.placement_targets[e.key] =", "(list, dict, str, unicode, int, float, bool, type(None))): return JSONEncoder.default(self,", "try: cur[name][var] = cfg.getint(section, var) except ConfigParser.NoOptionError: pass for var", "'bucket':bucket_name}) def get_obj_layout(self, key): path = '/' + key.bucket.name +", "= self.config.rados.ceph_conf except: raise RuntimeError( 'ceph_conf is missing under the", "access_key, aws_secret_access_key = secret_key, host=host, port=port, is_secure=is_secure, calling_format = boto.s3.connection.OrdinaryCallingFormat(),", "self.conn.create_bucket(name) def get_bucket(self, name, validate=True): return self.conn.get_bucket(name, validate=validate) class RGWRESTAdmin:", "suffix): return self.bucket_prefix + '-' + suffix def register_test(self, t):", "r[0] if (len(r) == 2): self.storage_class=r[1] else: self.storage_class = 'STANDARD'", "var) except ConfigParser.NoOptionError: pass print json.dumps(self.config) rgw_conf = self.config.rgw try:", "= rgw_conf.bucket_prefix except: self.bucket_prefix = 'ragweed' conn = bunch.Bunch() for", "= os.environ['RAGWEED_CONF'] except KeyError: raise RuntimeError( 'To run tests, point", "def create_bucket(self): bid = len(self.r_buckets) + 1 bucket_name = suite.get_bucket_name(self._name", "test.from_json(s) def is_preparing(self): return self.do_preparing def is_checking(self): return self.do_check class", "return obj_layout.manifest.tail_bucket.pool except: pass pr = RPlacementRule(placement_rule) return self.placement_target.get_data_pool(pr) class", "= {} for e in self.zone_params.placement_pools: self.placement_targets[e.key] = e.val print", "= {} for x in dir(self): if x.startswith('r_'): attrs[x] =", "RuntimeError( 'ceph_conf is missing under the [rados] section in '", "k = Key(self.config_bucket) k.key = 'tests/' + test._name s =", "[ 'is_secure', ] def dict_find(d, k): if d.has_key(k): return d[k]", "is_secure=is_secure, calling_format = boto.s3.connection.OrdinaryCallingFormat(), ) def create_bucket(self, name): return self.conn.create_bucket(name)", "for (k, u) in self.config.user.iteritems(): conn[k] = RGWConnection(u.access_key, u.secret_key, rgw_conf.host,", "rtest_decode_json(d): if '__pickle' in d: return pickle.loads(str(d['__pickle'])) return d class", "RGWRESTAdmin(self.conn.system) self.zone_params = self.rgw_rest_admin.get_zone_params() self.placement_targets = {} for e in", "bunch.Bunch() self.config[section_type][name] = bunch.Bunch() cur = self.config[section_type] except ValueError: section_type", "class RZone: def __init__(self, conn): self.conn = conn self.rgw_rest_admin =", "pass def prepare(self): pass def check(self): pass def to_json(self): attrs", "path = os.environ['RAGWEED_CONF'] except KeyError: raise RuntimeError( 'To run tests,", "ConfigParser.NoOptionError: pass for var in bool_config_opts: try: cur[name][var] = cfg.getboolean(section,", "explicit_pool = self.bucket_info.bucket.pool except: # new style explicit pool explicit_pool", "def refresh_rbucket(self, rbucket): rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name) rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name) class", "!= '': return explicit_pool return self.placement_target.get_data_pool(self.placement_rule) def get_tail_pool(self, obj_layout): try:", "'': print 'zone_params=', self.zone_params plid = self.zone_params.default_placement try: return RPlacementTarget(plid,", "= bucket_name.replace(\"_\", \"-\") rb = suite.zone.create_bucket(bucket_name) self.r_buckets.append(rb) return rb def", "'': return explicit_pool return self.placement_target.get_data_pool(self.placement_rule) def get_tail_pool(self, obj_layout): try: placement_rule", "= RZone(conn) self.suite = RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES']) try: self.ceph_conf", "var in bool_config_opts: try: cur[name][var] = cfg.getboolean(section, var) except ConfigParser.NoOptionError:", "def check(self): pass def to_json(self): attrs = {} for x", "def register_test(self, t): self.rtests.append(t) def write_test_data(self, test): k = Key(self.config_bucket)", "s test.from_json(s) def is_preparing(self): return self.do_preparing def is_checking(self): return self.do_check", "= bunch.Bunch() cur = self.config[section_type] except ValueError: section_type = ''", "= Key(self.config_bucket) k.key = 'tests/' + test._name k.set_contents_from_string(test.to_json()) def read_test_data(self,", "def __init__(self, zone, bucket, bucket_info): self.zone = zone self.bucket =", "in self.config.user.iteritems(): conn[k] = RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf,", "return self.storage_classes.get(placement_rule.storage_class).data_pool class RZone: def __init__(self, conn): self.conn = conn", "'zone_params:', self.zone_params def get_placement_target(self, placement_id): plid = placement_id if placement_id", "'user_id', 'access_key', 'secret_key', 'host', 'ceph_conf', 'bucket_prefix', ] int_config_opts = [", "cur[name] = bunch.Bunch() for var in str_config_opts: try: cur[name][var] =", "if key.version_id is not None: params['versionId'] = key.version_id print params", "def save(self): suite.write_test_data(self) def load(self): suite.read_test_data(self) for rb in self.r_buckets:", "explicit_pool != '': return explicit_pool return self.placement_target.get_data_pool(self.placement_rule) def get_tail_pool(self, obj_layout):", "a config file.', ) with file(path) as f: cfg.readfp(f) for", "RPlacementRule: def __init__(self, rule): r = rule.split('/', 1) self.placement_id =", "suite.get_bucket_name(self._name + '.' + str(bid)) bucket_name = bucket_name.replace(\"_\", \"-\") rb", "json.dumps(self.config) rgw_conf = self.config.rgw try: self.bucket_prefix = rgw_conf.bucket_prefix except: self.bucket_prefix", "conn = bunch.Bunch() for (k, u) in self.config.user.iteritems(): conn[k] =", "200: raise boto.exception.S3ResponseError(r.status, r.reason) return bunch.bunchify(json.loads(r.read())) def read_meta_key(self, key): return", "self.rtests.append(t) def write_test_data(self, test): k = Key(self.config_bucket) k.key = 'tests/'", "'STANDARD' sc = self.storage_classes[storage_class] except: eq('could not find storage class", "name class RPlacementTarget: def __init__(self, name, config): self.name = name", "self.zone_params = self.rgw_rest_admin.get_zone_params() self.placement_targets = {} for e in self.zone_params.placement_pools:", "self.zone_params def get_placement_target(self, placement_id): plid = placement_id if placement_id is", "k = Key(self.config_bucket) k.key = 'tests/' + test._name k.set_contents_from_string(test.to_json()) def", "self.load() self.check() def read_config(fp): config = bunch.Bunch() g = yaml.safe_load_all(fp)", "if hasattr(config, 'storage_classes'): self.storage_classes = config.storage_classes else: try: self.storage_classes =", "rgw_conf = self.config.rgw try: self.bucket_prefix = rgw_conf.bucket_prefix except: self.bucket_prefix =", "else: self.storage_class = 'STANDARD' class RBucket: def __init__(self, zone, bucket,", "self.storage_classes = bunch.bunchify({ 'STANDARD': { 'data_pool': config.data_pool }}) except: self.storage_classes", "self.do_check = False for step in suite_step.split(','): if step ==", "return None class RagweedEnv: def __init__(self): self.config = bunch.Bunch() cfg", "section.split(None, 1) if not self.config.has_key(section_type): self.config[section_type] = bunch.Bunch() self.config[section_type][name] =", "__init__(self): self.config = bunch.Bunch() cfg = ConfigParser.RawConfigParser() try: path =", "ConfigParser.NoOptionError: pass print json.dumps(self.config) rgw_conf = self.config.rgw try: self.bucket_prefix =", "the [rados] section in ' + os.environ['RAGWEED_CONF'] ) self.rados =", "name, bucket_prefix, zone, suite_step): self.name = name self.bucket_prefix = bucket_prefix", "self.conn = conn self.rgw_rest_admin = RGWRESTAdmin(self.conn.system) self.zone_params = self.rgw_rest_admin.get_zone_params() self.placement_targets", "self.rados.connect() pools = self.rados.list_pools() for pool in pools: print \"rados", "run tests, point environment ' + 'variable RAGWEED_CONF to a", "try: if not storage_class: storage_class = 'STANDARD' sc = self.storage_classes[storage_class]", "bucket_info return RBucket(self, bucket, bucket_info) def get_bucket(self, name): bucket =", "{'__pickle': pickle.dumps(obj)} def rtest_decode_json(d): if '__pickle' in d: return pickle.loads(str(d['__pickle']))", "RPlacementRule(placement_rule) return self.placement_target.get_data_pool(pr) class RStorageClasses: def __init__(self, config): if hasattr(config,", "= self.zone_params.default_placement try: return RPlacementTarget(plid, self.placement_targets[plid]) except: pass return None", "hasattr(config, 'storage_classes'): self.storage_classes = config.storage_classes else: try: self.storage_classes = bunch.bunchify({", "self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name) class RTest: def __init__(self): self._name = self.__class__.__name__ self.r_buckets =", "= suite.zone.create_bucket(bucket_name) self.r_buckets.append(rb) return rb def get_buckets(self): for rb in", "self.config cur[name] = bunch.Bunch() for var in str_config_opts: try: cur[name][var]", "print 'zone_params=', self.zone_params plid = self.zone_params.default_placement try: return RPlacementTarget(plid, self.placement_targets[plid])", "j[e]) def save(self): suite.write_test_data(self) def load(self): suite.read_test_data(self) for rb in", "boto.s3.connection import json import inspect import pickle import bunch import", "get_bucket(self, name, validate=True): return self.conn.get_bucket(name, validate=validate) class RGWRESTAdmin: def __init__(self,", "= len(self.r_buckets) + 1 bucket_name = suite.get_bucket_name(self._name + '.' +", "bucket_name = suite.get_bucket_name(self._name + '.' + str(bid)) bucket_name = bucket_name.replace(\"_\",", "conn self.rgw_rest_admin = RGWRESTAdmin(self.conn.system) self.zone_params = self.rgw_rest_admin.get_zone_params() self.placement_targets = {}", "bool_config_opts = [ 'is_secure', ] def dict_find(d, k): if d.has_key(k):", "calling_format = boto.s3.connection.OrdinaryCallingFormat(), ) def create_bucket(self, name): return self.conn.create_bucket(name) def", "explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool if explicit_pool is not None and explicit_pool", "if suite.is_preparing(): self.prepare() self.save() if suite.is_checking(): self.load() self.check() def read_config(fp):", "' + 'variable RAGWEED_CONF to a config file.', ) with", "ValueError: section_type = '' name = section self.config[name] = bunch.Bunch()", "= self.bucket_info.bucket.explicit_placement.data_pool if explicit_pool is not None and explicit_pool !=", "attrs[x] = getattr(self, x) return json.dumps(attrs, cls=RTestJSONSerialize) def from_json(self, s):", "e, j[e]) def save(self): suite.write_test_data(self) def load(self): suite.read_test_data(self) for rb", "except: self.storage_classes = None pass def get(self, storage_class): assert(self.storage_classes !=", "{'key': key}) def get_bucket_entrypoint(self, bucket_name): return self.read_meta_key('bucket:' + bucket_name) def", "pass def get(self, storage_class): assert(self.storage_classes != None) try: if not", "for rb in self.r_buckets: suite.zone.refresh_rbucket(rb) def test(self): suite.register_test(self) if suite.is_preparing():", "self.placement_target.get_data_pool(pr) class RStorageClasses: def __init__(self, config): if hasattr(config, 'storage_classes'): self.storage_classes", "self.bucket_prefix + '-' + suffix def register_test(self, t): self.rtests.append(t) def", "from nose.plugins.attrib import attr from nose.tools import eq_ as eq", "+ str(bid)) bucket_name = bucket_name.replace(\"_\", \"-\") rb = suite.zone.create_bucket(bucket_name) self.r_buckets.append(rb)", "suite.write_test_data(self) def load(self): suite.read_test_data(self) for rb in self.r_buckets: suite.zone.refresh_rbucket(rb) def", "self.zone.create_raw_bucket(self.get_bucket_name('conf')) if step == 'check' or step == 'test': self.do_check", "def get_all(self): for (name, _) in self.storage_classes.iteritems(): yield name class", "cfg = ConfigParser.RawConfigParser() try: path = os.environ['RAGWEED_CONF'] except KeyError: raise", "return None def get_default_placement(self): return get_placement_target(self.zone_params.default_placement) def create_bucket(self, name): bucket", "len(self.r_buckets) + 1 bucket_name = suite.get_bucket_name(self._name + '.' + str(bid))", "ConfigParser.NoOptionError: pass for var in int_config_opts: try: cur[name][var] = cfg.getint(section,", "def create_bucket(self, name): return self.conn.create_bucket(name) def get_bucket(self, name, validate=True): return", "'' name = section self.config[name] = bunch.Bunch() cur = self.config", "class RSuite: def __init__(self, name, bucket_prefix, zone, suite_step): self.name =", "def get_zone_params(self): return self.get_resource('/admin/config', {'type': 'zone'}) class RSuite: def __init__(self,", "cls=RTestJSONSerialize) def from_json(self, s): j = json.loads(s, object_hook=rtest_decode_json) for e", "assert(self.storage_classes != None) try: if not storage_class: storage_class = 'STANDARD'", "k.key = 'tests/' + test._name s = k.get_contents_as_string() print 'read_test_data=',", "nose.plugins.attrib import attr from nose.tools import eq_ as eq from", "self.init() def create_bucket(self): bid = len(self.r_buckets) + 1 bucket_name =", "pr = RPlacementRule(placement_rule) return self.placement_target.get_data_pool(pr) class RStorageClasses: def __init__(self, config):", "host=host, port=port, is_secure=is_secure, calling_format = boto.s3.connection.OrdinaryCallingFormat(), ) def create_bucket(self, name):", "path = '/' + key.bucket.name + '/' + key.name params", "'STANDARD' class RBucket: def __init__(self, zone, bucket, bucket_info): self.zone =", "self.storage_classes = RStorageClasses(config) if not self.data_extra_pool: self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD') def", "print bunch.bunchify(new) config.update(bunch.bunchify(new)) return config str_config_opts = [ 'user_id', 'access_key',", "aws_access_key_id = access_key, aws_secret_access_key = secret_key, host=host, port=port, is_secure=is_secure, calling_format", "config.storage_classes else: try: self.storage_classes = bunch.bunchify({ 'STANDARD': { 'data_pool': config.data_pool", "[] self.init() def create_bucket(self): bid = len(self.r_buckets) + 1 bucket_name", "[] self.do_preparing = False self.do_check = False for step in", "bunch.bunchify({ 'STANDARD': { 'data_pool': config.data_pool }}) except: self.storage_classes = None", "config): self.name = name self.index_pool = config.index_pool self.data_extra_pool = config.data_extra_pool", "get_tail_pool(self, obj_layout): try: placement_rule = obj_layout.manifest.tail_placement.placement_rule except: placement_rule = ''", "placement_rule = '' if placement_rule == '': try: # new", "init(self): pass def prepare(self): pass def check(self): pass def to_json(self):", "print 'read_test_data=', s test.from_json(s) def is_preparing(self): return self.do_preparing def is_checking(self):", "suite.zone.refresh_rbucket(rb) def test(self): suite.register_test(self) if suite.is_preparing(): self.prepare() self.save() if suite.is_checking():", "section self.config[name] = bunch.Bunch() cur = self.config cur[name] = bunch.Bunch()", "path, params) if r.status != 200: raise boto.exception.S3ResponseError(r.status, r.reason) return", "class RTest: def __init__(self): self._name = self.__class__.__name__ self.r_buckets = []", "from boto.s3.key import Key from nose.plugins.attrib import attr from nose.tools", "get_data_pool(self): try: # old style explicit pool explicit_pool = self.bucket_info.bucket.pool", "bucket_id: ep = self.get_bucket_entrypoint(bucket_name) print ep bucket_id = ep.data.bucket.bucket_id result", "def get_raw_bucket(self, name): return self.conn.regular.get_bucket(name) def refresh_rbucket(self, rbucket): rbucket.bucket =", "prepare(self): pass def check(self): pass def to_json(self): attrs = {}", "isinstance(obj, (list, dict, str, unicode, int, float, bool, type(None))): return", "= cfg.getint(section, var) except ConfigParser.NoOptionError: pass for var in bool_config_opts:", "Key(self.config_bucket) k.key = 'tests/' + test._name k.set_contents_from_string(test.to_json()) def read_test_data(self, test):", "yaml.safe_load_all(fp) for new in g: print bunch.bunchify(new) config.update(bunch.bunchify(new)) return config", "def get(self, storage_class): assert(self.storage_classes != None) try: if not storage_class:", "False for step in suite_step.split(','): if step == 'prepare': self.do_preparing", "print json.dumps(self.config) rgw_conf = self.config.rgw try: self.bucket_prefix = rgw_conf.bucket_prefix except:", "return json.dumps(attrs, cls=RTestJSONSerialize) def from_json(self, s): j = json.loads(s, object_hook=rtest_decode_json)", "config = bunch.Bunch() g = yaml.safe_load_all(fp) for new in g:", "{ 'data_pool': config.data_pool }}) except: self.storage_classes = None pass def", "result.data.bucket_info def check_bucket_index(self, bucket_name): return self.get_resource('/admin/bucket',{'index' : None, 'bucket':bucket_name}) def", "def get_placement_target(self, placement_id): plid = placement_id if placement_id is None", "create_raw_bucket(self, name): return self.conn.regular.create_bucket(name) def get_raw_bucket(self, name): return self.conn.regular.get_bucket(name) def", "raise RuntimeError( 'To run tests, point environment ' + 'variable", "return bunch.bunchify(json.loads(r.read())) def read_meta_key(self, key): return self.get_resource('/admin/metadata', {'key': key}) def", "return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool except: pass try: # old style return obj_layout.manifest.tail_bucket.pool", "bucket_name) def get_bucket_instance_info(self, bucket_name, bucket_id = None): if not bucket_id:", "def __init__(self): self.config = bunch.Bunch() cfg = ConfigParser.RawConfigParser() try: path", "raise RuntimeError( 'ceph_conf is missing under the [rados] section in", "global ragweed_env global suite ragweed_env = RagweedEnv() suite = ragweed_env.suite", "None: params['versionId'] = key.version_id print params return self.get_resource(path, params) def", "get_obj_layout(self, key): path = '/' + key.bucket.name + '/' +", "'/' + key.name params = {'layout': None} if key.version_id is", "self.zone.get_raw_bucket(self.get_bucket_name('conf')) def get_bucket_name(self, suffix): return self.bucket_prefix + '-' + suffix", "if (len(r) == 2): self.storage_class=r[1] else: self.storage_class = 'STANDARD' class", "get_data_pool(self, placement_rule): return self.storage_classes.get(placement_rule.storage_class).data_pool class RZone: def __init__(self, conn): self.conn", "= False for step in suite_step.split(','): if step == 'prepare':", "'is_secure', ] def dict_find(d, k): if d.has_key(k): return d[k] return", "bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info return RBucket(self, bucket, bucket_info)", "setup_module(): global ragweed_env global suite ragweed_env = RagweedEnv() suite =", "self.placement_target.get_data_pool(self.placement_rule) def get_tail_pool(self, obj_layout): try: placement_rule = obj_layout.manifest.tail_placement.placement_rule except: placement_rule", "def get_resource(self, path, params): r = _make_admin_request(self.conn, \"GET\", path, params)", "return self.read_meta_key('bucket:' + bucket_name) def get_bucket_instance_info(self, bucket_name, bucket_id = None):", "] int_config_opts = [ 'port', ] bool_config_opts = [ 'is_secure',", "= 'STANDARD' class RBucket: def __init__(self, zone, bucket, bucket_info): self.zone", "class RBucket: def __init__(self, zone, bucket, bucket_info): self.zone = zone", "def get_obj_layout(self, key): path = '/' + key.bucket.name + '/'", "return RBucket(self, bucket, bucket_info) def get_bucket(self, name): bucket = self.get_raw_bucket(name)", "import sys import os import boto import boto.s3.connection import json", "try: return RPlacementTarget(plid, self.placement_targets[plid]) except: pass return None def get_default_placement(self):", "bucket = self.create_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info return", "= [ 'port', ] bool_config_opts = [ 'is_secure', ] def", "RuntimeError( 'To run tests, point environment ' + 'variable RAGWEED_CONF", "+ 'variable RAGWEED_CONF to a config file.', ) with file(path)", "name self.bucket_prefix = bucket_prefix self.zone = zone self.config_bucket = None", "test._name k.set_contents_from_string(test.to_json()) def read_test_data(self, test): k = Key(self.config_bucket) k.key =", "= 'ragweed' conn = bunch.Bunch() for (k, u) in self.config.user.iteritems():", "+ '/' + key.name params = {'layout': None} if key.version_id", "'' if placement_rule == '': try: # new style return", "for new in g: print bunch.bunchify(new) config.update(bunch.bunchify(new)) return config str_config_opts", "self.do_check = True self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf')) def get_bucket_name(self, suffix): return", "explicit_pool return self.placement_target.get_data_pool(self.placement_rule) def get_tail_pool(self, obj_layout): try: placement_rule = obj_layout.manifest.tail_placement.placement_rule", "RTest: def __init__(self): self._name = self.__class__.__name__ self.r_buckets = [] self.init()", "ConfigParser import rados from boto.s3.key import Key from nose.plugins.attrib import", "config.update(bunch.bunchify(new)) return config str_config_opts = [ 'user_id', 'access_key', 'secret_key', 'host',", "return get_placement_target(self.zone_params.default_placement) def create_bucket(self, name): bucket = self.create_raw_bucket(name) bucket_info =", "\"-\") rb = suite.zone.create_bucket(bucket_name) self.r_buckets.append(rb) return rb def get_buckets(self): for", "self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info return RBucket(self, bucket, bucket_info) def get_bucket(self,", "self.storage_classes.iteritems(): yield name class RPlacementTarget: def __init__(self, name, config): self.name", "= 'tests/' + test._name s = k.get_contents_as_string() print 'read_test_data=', s", "key.version_id print params return self.get_resource(path, params) def get_zone_params(self): return self.get_resource('/admin/config',", "'STANDARD': { 'data_pool': config.data_pool }}) except: self.storage_classes = None pass", "= self.storage_classes[storage_class] except: eq('could not find storage class ' +", "def get_data_pool(self, placement_rule): return self.storage_classes.get(placement_rule.storage_class).data_pool class RZone: def __init__(self, conn):", "self._name = self.__class__.__name__ self.r_buckets = [] self.init() def create_bucket(self): bid", "'ceph_conf is missing under the [rados] section in ' +", "config file.', ) with file(path) as f: cfg.readfp(f) for section", "to a config file.', ) with file(path) as f: cfg.readfp(f)", "in g: print bunch.bunchify(new) config.update(bunch.bunchify(new)) return config str_config_opts = [", "= obj_layout.manifest.tail_placement.placement_rule except: placement_rule = '' if placement_rule == '':", "plid = placement_id if placement_id is None or placement_id ==", "True self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf')) def get_bucket_name(self, suffix): return self.bucket_prefix +", "1) if not self.config.has_key(section_type): self.config[section_type] = bunch.Bunch() self.config[section_type][name] = bunch.Bunch()", "self.bucket_prefix = 'ragweed' conn = bunch.Bunch() for (k, u) in", "pool explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool if explicit_pool is not None and", "eq_ as eq from .reqs import _make_admin_request ragweed_env = None", "def to_json(self): attrs = {} for x in dir(self): if", "= config.index_pool self.data_extra_pool = config.data_extra_pool self.storage_classes = RStorageClasses(config) if not", "rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure')) self.zone = RZone(conn) self.suite =", "def __init__(self, name, config): self.name = name self.index_pool = config.index_pool", "= self.create_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info return RBucket(self,", "except ConfigParser.NoOptionError: pass for var in bool_config_opts: try: cur[name][var] =", "bucket = self.get_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info return", "zone self.bucket = bucket self.name = bucket.name self.bucket_info = bucket_info", "for var in int_config_opts: try: cur[name][var] = cfg.getint(section, var) except", "'__pickle' in d: return pickle.loads(str(d['__pickle'])) return d class RPlacementRule: def", "os.environ['RAGWEED_CONF'] except KeyError: raise RuntimeError( 'To run tests, point environment", "RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure')) self.zone = RZone(conn)", "eq from .reqs import _make_admin_request ragweed_env = None suite =", "None and explicit_pool != '': return explicit_pool return self.placement_target.get_data_pool(self.placement_rule) def", "in self.zone_params.placement_pools: self.placement_targets[e.key] = e.val print 'zone_params:', self.zone_params def get_placement_target(self,", "name = section self.config[name] = bunch.Bunch() cur = self.config cur[name]", "suite.zone.create_bucket(bucket_name) self.r_buckets.append(rb) return rb def get_buckets(self): for rb in self.r_buckets:", "self.conn.regular.create_bucket(name) def get_raw_bucket(self, name): return self.conn.regular.get_bucket(name) def refresh_rbucket(self, rbucket): rbucket.bucket", "_) in self.storage_classes.iteritems(): yield name class RPlacementTarget: def __init__(self, name,", "style explicit pool explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool if explicit_pool is not", "(len(r) == 2): self.storage_class=r[1] else: self.storage_class = 'STANDARD' class RBucket:", "get_resource(self, path, params): r = _make_admin_request(self.conn, \"GET\", path, params) if", "in suite_step.split(','): if step == 'prepare': self.do_preparing = True self.config_bucket", "JSONEncoder.default(self, obj) return {'__pickle': pickle.dumps(obj)} def rtest_decode_json(d): if '__pickle' in", "'storage_classes'): self.storage_classes = config.storage_classes else: try: self.storage_classes = bunch.bunchify({ 'STANDARD':", "__init__(self, name, config): self.name = name self.index_pool = config.index_pool self.data_extra_pool", "= self.storage_classes.get_data_pool('STANDARD') def get_data_pool(self, placement_rule): return self.storage_classes.get(placement_rule.storage_class).data_pool class RZone: def", "self.get_raw_bucket(rbucket.bucket.name) rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name) class RTest: def __init__(self): self._name =", "self.config[section_type][name] = bunch.Bunch() cur = self.config[section_type] except ValueError: section_type =", "except: # new style explicit pool explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool if", "json.dumps(attrs, cls=RTestJSONSerialize) def from_json(self, s): j = json.loads(s, object_hook=rtest_decode_json) for", "bunch.Bunch() for (k, u) in self.config.user.iteritems(): conn[k] = RGWConnection(u.access_key, u.secret_key,", "= [] self.do_preparing = False self.do_check = False for step", "+ suffix def register_test(self, t): self.rtests.append(t) def write_test_data(self, test): k", "k.set_contents_from_string(test.to_json()) def read_test_data(self, test): k = Key(self.config_bucket) k.key = 'tests/'", "def __init__(self, name, bucket_prefix, zone, suite_step): self.name = name self.bucket_prefix", "params = {'layout': None} if key.version_id is not None: params['versionId']", "obj_layout): try: placement_rule = obj_layout.manifest.tail_placement.placement_rule except: placement_rule = '' if", "# old style return obj_layout.manifest.tail_bucket.pool except: pass pr = RPlacementRule(placement_rule)", "= self.__class__.__name__ self.r_buckets = [] self.init() def create_bucket(self): bid =", "boto import boto.s3.connection import json import inspect import pickle import", "bucket_id = None): if not bucket_id: ep = self.get_bucket_entrypoint(bucket_name) print", "return d class RPlacementRule: def __init__(self, rule): r = rule.split('/',", "create_bucket(self, name): bucket = self.create_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:',", "= cfg.getboolean(section, var) except ConfigParser.NoOptionError: pass print json.dumps(self.config) rgw_conf =", "pool def setup_module(): global ragweed_env global suite ragweed_env = RagweedEnv()", "= key.version_id print params return self.get_resource(path, params) def get_zone_params(self): return", "'tests/' + test._name s = k.get_contents_as_string() print 'read_test_data=', s test.from_json(s)", "'bucket_info:', bucket_info return RBucket(self, bucket, bucket_info) def get_bucket(self, name): bucket", "def get_bucket_entrypoint(self, bucket_name): return self.read_meta_key('bucket:' + bucket_name) def get_bucket_instance_info(self, bucket_name,", "return self.placement_target.get_data_pool(self.placement_rule) def get_tail_pool(self, obj_layout): try: placement_rule = obj_layout.manifest.tail_placement.placement_rule except:", "old style return obj_layout.manifest.tail_bucket.pool except: pass pr = RPlacementRule(placement_rule) return", "import boto.s3.connection import json import inspect import pickle import bunch", "def get_default_placement(self): return get_placement_target(self.zone_params.default_placement) def create_bucket(self, name): bucket = self.create_raw_bucket(name)", "rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name) class RTest: def __init__(self): self._name = self.__class__.__name__", "return self.conn.create_bucket(name) def get_bucket(self, name, validate=True): return self.conn.get_bucket(name, validate=validate) class", "json import inspect import pickle import bunch import yaml import", "import Key from nose.plugins.attrib import attr from nose.tools import eq_", "None class RGWConnection: def __init__(self, access_key, secret_key, host, port, is_secure):", "pool explicit_pool = self.bucket_info.bucket.pool except: # new style explicit pool", "= bunch.bunchify({ 'STANDARD': { 'data_pool': config.data_pool }}) except: self.storage_classes =", "print ep bucket_id = ep.data.bucket.bucket_id result = self.read_meta_key('bucket.instance:' + bucket_name", "import bunch import yaml import ConfigParser import rados from boto.s3.key", "None pass def get(self, storage_class): assert(self.storage_classes != None) try: if", "style explicit pool explicit_pool = self.bucket_info.bucket.pool except: # new style", "sys import os import boto import boto.s3.connection import json import", "x.startswith('r_'): attrs[x] = getattr(self, x) return json.dumps(attrs, cls=RTestJSONSerialize) def from_json(self,", "= _make_admin_request(self.conn, \"GET\", path, params) if r.status != 200: raise", "try: (section_type, name) = section.split(None, 1) if not self.config.has_key(section_type): self.config[section_type]", "key}) def get_bucket_entrypoint(self, bucket_name): return self.read_meta_key('bucket:' + bucket_name) def get_bucket_instance_info(self,", "suite.is_checking(): self.load() self.check() def read_config(fp): config = bunch.Bunch() g =", "rule.split('/', 1) self.placement_id = r[0] if (len(r) == 2): self.storage_class=r[1]", "not bucket_id: ep = self.get_bucket_entrypoint(bucket_name) print ep bucket_id = ep.data.bucket.bucket_id", "self.do_preparing def is_checking(self): return self.do_check class RTestJSONSerialize(json.JSONEncoder): def default(self, obj):", "os.environ['RAGWEED_STAGES']) try: self.ceph_conf = self.config.rados.ceph_conf except: raise RuntimeError( 'ceph_conf is", "self.get_resource('/admin/config', {'type': 'zone'}) class RSuite: def __init__(self, name, bucket_prefix, zone,", "def __init__(self, connection): self.conn = connection def get_resource(self, path, params):", "= self.get_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info return RBucket(self,", "def prepare(self): pass def check(self): pass def to_json(self): attrs =", "= bunch.Bunch() self.config[section_type][name] = bunch.Bunch() cur = self.config[section_type] except ValueError:", "= self.zone.get_placement_target(self.bucket_info.placement_rule) except: pass def get_data_pool(self): try: # old style", "not find storage class ' + storage_class, 0) return sc", "pools: print \"rados pool>\", pool def setup_module(): global ragweed_env global", "suite = None class RGWConnection: def __init__(self, access_key, secret_key, host,", "self.rgw_rest_admin = RGWRESTAdmin(self.conn.system) self.zone_params = self.rgw_rest_admin.get_zone_params() self.placement_targets = {} for", "self.zone_params.default_placement try: return RPlacementTarget(plid, self.placement_targets[plid]) except: pass return None def", "bunch.Bunch() cur = self.config cur[name] = bunch.Bunch() for var in", "self.storage_classes.get_data_pool('STANDARD') def get_data_pool(self, placement_rule): return self.storage_classes.get(placement_rule.storage_class).data_pool class RZone: def __init__(self,", "self.zone = RZone(conn) self.suite = RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES']) try:", "read_config(fp): config = bunch.Bunch() g = yaml.safe_load_all(fp) for new in", "self.config[section_type] except ValueError: section_type = '' name = section self.config[name]", "bucket self.name = bucket.name self.bucket_info = bucket_info try: self.placement_rule =", "self.ceph_conf = self.config.rados.ceph_conf except: raise RuntimeError( 'ceph_conf is missing under", "return self.get_resource('/admin/config', {'type': 'zone'}) class RSuite: def __init__(self, name, bucket_prefix,", "missing under the [rados] section in ' + os.environ['RAGWEED_CONF'] )", "step == 'check' or step == 'test': self.do_check = True", "rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name) rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name) class RTest: def __init__(self):", "class RagweedEnv: def __init__(self): self.config = bunch.Bunch() cfg = ConfigParser.RawConfigParser()", "rados from boto.s3.key import Key from nose.plugins.attrib import attr from", "connection): self.conn = connection def get_resource(self, path, params): r =", "bunch.Bunch() g = yaml.safe_load_all(fp) for new in g: print bunch.bunchify(new)", "int, float, bool, type(None))): return JSONEncoder.default(self, obj) return {'__pickle': pickle.dumps(obj)}", "boto.connect_s3( aws_access_key_id = access_key, aws_secret_access_key = secret_key, host=host, port=port, is_secure=is_secure,", "self.storage_classes = config.storage_classes else: try: self.storage_classes = bunch.bunchify({ 'STANDARD': {", "yield name class RPlacementTarget: def __init__(self, name, config): self.name =", "create_bucket(self, name): return self.conn.create_bucket(name) def get_bucket(self, name, validate=True): return self.conn.get_bucket(name,", "pool>\", pool def setup_module(): global ragweed_env global suite ragweed_env =", "is_secure self.conn = boto.connect_s3( aws_access_key_id = access_key, aws_secret_access_key = secret_key,", "RTestJSONSerialize(json.JSONEncoder): def default(self, obj): if isinstance(obj, (list, dict, str, unicode,", "__init__(self, connection): self.conn = connection def get_resource(self, path, params): r", "obj) return {'__pickle': pickle.dumps(obj)} def rtest_decode_json(d): if '__pickle' in d:", "return RBucket(self, bucket, bucket_info) def create_raw_bucket(self, name): return self.conn.regular.create_bucket(name) def", "tests, point environment ' + 'variable RAGWEED_CONF to a config", "self.config.user.iteritems(): conn[k] = RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure'))", "str_config_opts: try: cur[name][var] = cfg.get(section, var) except ConfigParser.NoOptionError: pass for", "def __init__(self, access_key, secret_key, host, port, is_secure): self.host = host", "def get_buckets(self): for rb in self.r_buckets: yield rb def init(self):", "file.', ) with file(path) as f: cfg.readfp(f) for section in", "\"rados pool>\", pool def setup_module(): global ragweed_env global suite ragweed_env", "self.bucket_info.bucket.pool except: # new style explicit pool explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool", "Key from nose.plugins.attrib import attr from nose.tools import eq_ as", "= host self.port = port self.is_secure = is_secure self.conn =", "key): path = '/' + key.bucket.name + '/' + key.name", "= rados.Rados(conffile=self.ceph_conf) self.rados.connect() pools = self.rados.list_pools() for pool in pools:", "name) = section.split(None, 1) if not self.config.has_key(section_type): self.config[section_type] = bunch.Bunch()", "return sc def get_all(self): for (name, _) in self.storage_classes.iteritems(): yield", "section in ' + os.environ['RAGWEED_CONF'] ) self.rados = rados.Rados(conffile=self.ceph_conf) self.rados.connect()", "= None self.rtests = [] self.do_preparing = False self.do_check =", "False self.do_check = False for step in suite_step.split(','): if step", "placement_id is None or placement_id == '': print 'zone_params=', self.zone_params", "= name self.index_pool = config.index_pool self.data_extra_pool = config.data_extra_pool self.storage_classes =", "check_bucket_index(self, bucket_name): return self.get_resource('/admin/bucket',{'index' : None, 'bucket':bucket_name}) def get_obj_layout(self, key):", "rule): r = rule.split('/', 1) self.placement_id = r[0] if (len(r)", "= True self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf')) if step == 'check' or", "return self.do_check class RTestJSONSerialize(json.JSONEncoder): def default(self, obj): if isinstance(obj, (list,", "return self.get_resource('/admin/bucket',{'index' : None, 'bucket':bucket_name}) def get_obj_layout(self, key): path =", "RBucket: def __init__(self, zone, bucket, bucket_info): self.zone = zone self.bucket", "+ '.' + str(bid)) bucket_name = bucket_name.replace(\"_\", \"-\") rb =", "r = rule.split('/', 1) self.placement_id = r[0] if (len(r) ==", "not self.config.has_key(section_type): self.config[section_type] = bunch.Bunch() self.config[section_type][name] = bunch.Bunch() cur =", "return self.conn.regular.create_bucket(name) def get_raw_bucket(self, name): return self.conn.regular.get_bucket(name) def refresh_rbucket(self, rbucket):", "dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure')) self.zone = RZone(conn) self.suite = RSuite('ragweed',", "sc = self.storage_classes[storage_class] except: eq('could not find storage class '", "params return self.get_resource(path, params) def get_zone_params(self): return self.get_resource('/admin/config', {'type': 'zone'})", "for x in dir(self): if x.startswith('r_'): attrs[x] = getattr(self, x)", "step == 'prepare': self.do_preparing = True self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf')) if", "'tests/' + test._name k.set_contents_from_string(test.to_json()) def read_test_data(self, test): k = Key(self.config_bucket)", "= is_secure self.conn = boto.connect_s3( aws_access_key_id = access_key, aws_secret_access_key =", "boto.exception.S3ResponseError(r.status, r.reason) return bunch.bunchify(json.loads(r.read())) def read_meta_key(self, key): return self.get_resource('/admin/metadata', {'key':", "= self.get_bucket_entrypoint(bucket_name) print ep bucket_id = ep.data.bucket.bucket_id result = self.read_meta_key('bucket.instance:'", "= self.zone.get_raw_bucket(self.get_bucket_name('conf')) def get_bucket_name(self, suffix): return self.bucket_prefix + '-' +", "name, config): self.name = name self.index_pool = config.index_pool self.data_extra_pool =", "RStorageClasses(config) if not self.data_extra_pool: self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD') def get_data_pool(self, placement_rule):", "read_test_data(self, test): k = Key(self.config_bucket) k.key = 'tests/' + test._name", "return self.do_preparing def is_checking(self): return self.do_check class RTestJSONSerialize(json.JSONEncoder): def default(self,", "ep.data.bucket.bucket_id result = self.read_meta_key('bucket.instance:' + bucket_name + \":\" + bucket_id)", ": None, 'bucket':bucket_name}) def get_obj_layout(self, key): path = '/' +", "step in suite_step.split(','): if step == 'prepare': self.do_preparing = True", "__init__(self, config): if hasattr(config, 'storage_classes'): self.storage_classes = config.storage_classes else: try:", "in pools: print \"rados pool>\", pool def setup_module(): global ragweed_env", "= self.zone.create_raw_bucket(self.get_bucket_name('conf')) if step == 'check' or step == 'test':", "name, validate=True): return self.conn.get_bucket(name, validate=validate) class RGWRESTAdmin: def __init__(self, connection):", "] bool_config_opts = [ 'is_secure', ] def dict_find(d, k): if", "= section self.config[name] = bunch.Bunch() cur = self.config cur[name] =", "yaml import ConfigParser import rados from boto.s3.key import Key from", "os import boto import boto.s3.connection import json import inspect import", "'check' or step == 'test': self.do_check = True self.config_bucket =", "self.bucket_prefix = rgw_conf.bucket_prefix except: self.bucket_prefix = 'ragweed' conn = bunch.Bunch()", "'data_pool': config.data_pool }}) except: self.storage_classes = None pass def get(self,", "name self.index_pool = config.index_pool self.data_extra_pool = config.data_extra_pool self.storage_classes = RStorageClasses(config)", "# new style return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool except: pass try: # old", "storage class ' + storage_class, 0) return sc def get_all(self):", "eq('could not find storage class ' + storage_class, 0) return", "= boto.connect_s3( aws_access_key_id = access_key, aws_secret_access_key = secret_key, host=host, port=port,", "'-' + suffix def register_test(self, t): self.rtests.append(t) def write_test_data(self, test):", "bid = len(self.r_buckets) + 1 bucket_name = suite.get_bucket_name(self._name + '.'", "KeyError: raise RuntimeError( 'To run tests, point environment ' +", "self.get_resource('/admin/metadata', {'key': key}) def get_bucket_entrypoint(self, bucket_name): return self.read_meta_key('bucket:' + bucket_name)", "self.r_buckets: suite.zone.refresh_rbucket(rb) def test(self): suite.register_test(self) if suite.is_preparing(): self.prepare() self.save() if", "bucket.name self.bucket_info = bucket_info try: self.placement_rule = RPlacementRule(self.bucket_info.placement_rule) self.placement_target =", "import yaml import ConfigParser import rados from boto.s3.key import Key", "if not bucket_id: ep = self.get_bucket_entrypoint(bucket_name) print ep bucket_id =", "is not None: params['versionId'] = key.version_id print params return self.get_resource(path,", "except: placement_rule = '' if placement_rule == '': try: #", "def get_bucket(self, name): bucket = self.get_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print", "== '': print 'zone_params=', self.zone_params plid = self.zone_params.default_placement try: return", "config.data_pool }}) except: self.storage_classes = None pass def get(self, storage_class):", "'prepare': self.do_preparing = True self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf')) if step ==", "try: cur[name][var] = cfg.get(section, var) except ConfigParser.NoOptionError: pass for var", "= rule.split('/', 1) self.placement_id = r[0] if (len(r) == 2):", "self.name = name self.index_pool = config.index_pool self.data_extra_pool = config.data_extra_pool self.storage_classes", "r.reason) return bunch.bunchify(json.loads(r.read())) def read_meta_key(self, key): return self.get_resource('/admin/metadata', {'key': key})", "}}) except: self.storage_classes = None pass def get(self, storage_class): assert(self.storage_classes", "self.placement_targets[e.key] = e.val print 'zone_params:', self.zone_params def get_placement_target(self, placement_id): plid", "+ bucket_id) return result.data.bucket_info def check_bucket_index(self, bucket_name): return self.get_resource('/admin/bucket',{'index' :", "self.r_buckets.append(rb) return rb def get_buckets(self): for rb in self.r_buckets: yield", "pass try: # old style return obj_layout.manifest.tail_bucket.pool except: pass pr", "for rb in self.r_buckets: yield rb def init(self): pass def", "import inspect import pickle import bunch import yaml import ConfigParser", "'port', ] bool_config_opts = [ 'is_secure', ] def dict_find(d, k):", "self.zone, os.environ['RAGWEED_STAGES']) try: self.ceph_conf = self.config.rados.ceph_conf except: raise RuntimeError( 'ceph_conf", "= bucket.name self.bucket_info = bucket_info try: self.placement_rule = RPlacementRule(self.bucket_info.placement_rule) self.placement_target", "def get_data_pool(self): try: # old style explicit pool explicit_pool =", "for var in str_config_opts: try: cur[name][var] = cfg.get(section, var) except", "u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure')) self.zone = RZone(conn) self.suite", "except KeyError: raise RuntimeError( 'To run tests, point environment '", "import os import boto import boto.s3.connection import json import inspect", "cur[name][var] = cfg.getint(section, var) except ConfigParser.NoOptionError: pass for var in", ") self.rados = rados.Rados(conffile=self.ceph_conf) self.rados.connect() pools = self.rados.list_pools() for pool", "'bucket_prefix', ] int_config_opts = [ 'port', ] bool_config_opts = [", "key.name params = {'layout': None} if key.version_id is not None:", "is None or placement_id == '': print 'zone_params=', self.zone_params plid", "port=port, is_secure=is_secure, calling_format = boto.s3.connection.OrdinaryCallingFormat(), ) def create_bucket(self, name): return", "self.do_preparing = True self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf')) if step == 'check'", "is not None and explicit_pool != '': return explicit_pool return", "Key(self.config_bucket) k.key = 'tests/' + test._name s = k.get_contents_as_string() print", "get_raw_bucket(self, name): return self.conn.regular.get_bucket(name) def refresh_rbucket(self, rbucket): rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name)", "cur = self.config cur[name] = bunch.Bunch() for var in str_config_opts:", "g: print bunch.bunchify(new) config.update(bunch.bunchify(new)) return config str_config_opts = [ 'user_id',", "class RGWConnection: def __init__(self, access_key, secret_key, host, port, is_secure): self.host", "check(self): pass def to_json(self): attrs = {} for x in", "def default(self, obj): if isinstance(obj, (list, dict, str, unicode, int,", "j = json.loads(s, object_hook=rtest_decode_json) for e in j: setattr(self, e,", "suite_step): self.name = name self.bucket_prefix = bucket_prefix self.zone = zone", "as f: cfg.readfp(f) for section in cfg.sections(): try: (section_type, name)", "pools = self.rados.list_pools() for pool in pools: print \"rados pool>\",", "def is_checking(self): return self.do_check class RTestJSONSerialize(json.JSONEncoder): def default(self, obj): if", "== 'prepare': self.do_preparing = True self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf')) if step", "bucket_info): self.zone = zone self.bucket = bucket self.name = bucket.name", "'.' + str(bid)) bucket_name = bucket_name.replace(\"_\", \"-\") rb = suite.zone.create_bucket(bucket_name)", "print 'zone_params:', self.zone_params def get_placement_target(self, placement_id): plid = placement_id if", "try: # old style explicit pool explicit_pool = self.bucket_info.bucket.pool except:", "for e in self.zone_params.placement_pools: self.placement_targets[e.key] = e.val print 'zone_params:', self.zone_params", "_make_admin_request ragweed_env = None suite = None class RGWConnection: def", "except: pass return None def get_default_placement(self): return get_placement_target(self.zone_params.default_placement) def create_bucket(self,", "var in int_config_opts: try: cur[name][var] = cfg.getint(section, var) except ConfigParser.NoOptionError:", "self.config = bunch.Bunch() cfg = ConfigParser.RawConfigParser() try: path = os.environ['RAGWEED_CONF']", "conn): self.conn = conn self.rgw_rest_admin = RGWRESTAdmin(self.conn.system) self.zone_params = self.rgw_rest_admin.get_zone_params()", "return d[k] return None class RagweedEnv: def __init__(self): self.config =", "host, port, is_secure): self.host = host self.port = port self.is_secure", "self.config_bucket = None self.rtests = [] self.do_preparing = False self.do_check", "load(self): suite.read_test_data(self) for rb in self.r_buckets: suite.zone.refresh_rbucket(rb) def test(self): suite.register_test(self)", "placement_rule == '': try: # new style return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool except:", "self.conn.regular.get_bucket(name) def refresh_rbucket(self, rbucket): rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name) rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name)", "name): return self.conn.regular.create_bucket(name) def get_raw_bucket(self, name): return self.conn.regular.get_bucket(name) def refresh_rbucket(self,", "s): j = json.loads(s, object_hook=rtest_decode_json) for e in j: setattr(self,", "def read_meta_key(self, key): return self.get_resource('/admin/metadata', {'key': key}) def get_bucket_entrypoint(self, bucket_name):", "bucket_name.replace(\"_\", \"-\") rb = suite.zone.create_bucket(bucket_name) self.r_buckets.append(rb) return rb def get_buckets(self):", "(name, _) in self.storage_classes.iteritems(): yield name class RPlacementTarget: def __init__(self,", "var) except ConfigParser.NoOptionError: pass for var in int_config_opts: try: cur[name][var]", "get(self, storage_class): assert(self.storage_classes != None) try: if not storage_class: storage_class", "attr from nose.tools import eq_ as eq from .reqs import", "+ os.environ['RAGWEED_CONF'] ) self.rados = rados.Rados(conffile=self.ceph_conf) self.rados.connect() pools = self.rados.list_pools()", "is_checking(self): return self.do_check class RTestJSONSerialize(json.JSONEncoder): def default(self, obj): if isinstance(obj,", "= self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name) class RTest: def __init__(self): self._name = self.__class__.__name__ self.r_buckets", "u) in self.config.user.iteritems(): conn[k] = RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'),", "{'layout': None} if key.version_id is not None: params['versionId'] = key.version_id", "= bucket self.name = bucket.name self.bucket_info = bucket_info try: self.placement_rule", "self.get_resource('/admin/bucket',{'index' : None, 'bucket':bucket_name}) def get_obj_layout(self, key): path = '/'", "port, is_secure): self.host = host self.port = port self.is_secure =", "class RTestJSONSerialize(json.JSONEncoder): def default(self, obj): if isinstance(obj, (list, dict, str,", "if step == 'check' or step == 'test': self.do_check =", "RPlacementTarget(plid, self.placement_targets[plid]) except: pass return None def get_default_placement(self): return get_placement_target(self.zone_params.default_placement)", "setattr(self, e, j[e]) def save(self): suite.write_test_data(self) def load(self): suite.read_test_data(self) for", "self.conn = connection def get_resource(self, path, params): r = _make_admin_request(self.conn,", "test): k = Key(self.config_bucket) k.key = 'tests/' + test._name s", "= '' if placement_rule == '': try: # new style", "read_meta_key(self, key): return self.get_resource('/admin/metadata', {'key': key}) def get_bucket_entrypoint(self, bucket_name): return", "in dir(self): if x.startswith('r_'): attrs[x] = getattr(self, x) return json.dumps(attrs,", "self.prepare() self.save() if suite.is_checking(): self.load() self.check() def read_config(fp): config =", "= self.config cur[name] = bunch.Bunch() for var in str_config_opts: try:", "None self.rtests = [] self.do_preparing = False self.do_check = False", "result = self.read_meta_key('bucket.instance:' + bucket_name + \":\" + bucket_id) return", "get_bucket(self, name): bucket = self.get_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:',", "cur[name][var] = cfg.getboolean(section, var) except ConfigParser.NoOptionError: pass print json.dumps(self.config) rgw_conf", "test): k = Key(self.config_bucket) k.key = 'tests/' + test._name k.set_contents_from_string(test.to_json())", "t): self.rtests.append(t) def write_test_data(self, test): k = Key(self.config_bucket) k.key =", "in j: setattr(self, e, j[e]) def save(self): suite.write_test_data(self) def load(self):", "= name self.bucket_prefix = bucket_prefix self.zone = zone self.config_bucket =", "path, params): r = _make_admin_request(self.conn, \"GET\", path, params) if r.status", "__init__(self, access_key, secret_key, host, port, is_secure): self.host = host self.port", "= zone self.config_bucket = None self.rtests = [] self.do_preparing =", "def check_bucket_index(self, bucket_name): return self.get_resource('/admin/bucket',{'index' : None, 'bucket':bucket_name}) def get_obj_layout(self,", "return pickle.loads(str(d['__pickle'])) return d class RPlacementRule: def __init__(self, rule): r", "self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf')) def get_bucket_name(self, suffix): return self.bucket_prefix + '-'", "self.r_buckets: yield rb def init(self): pass def prepare(self): pass def", "self.port = port self.is_secure = is_secure self.conn = boto.connect_s3( aws_access_key_id", "placement_rule): return self.storage_classes.get(placement_rule.storage_class).data_pool class RZone: def __init__(self, conn): self.conn =", "+ 1 bucket_name = suite.get_bucket_name(self._name + '.' + str(bid)) bucket_name", "placement_id if placement_id is None or placement_id == '': print", "raise boto.exception.S3ResponseError(r.status, r.reason) return bunch.bunchify(json.loads(r.read())) def read_meta_key(self, key): return self.get_resource('/admin/metadata',", "self.placement_targets = {} for e in self.zone_params.placement_pools: self.placement_targets[e.key] = e.val", "= self.bucket_info.bucket.pool except: # new style explicit pool explicit_pool =", "self.zone = zone self.config_bucket = None self.rtests = [] self.do_preparing", "or step == 'test': self.do_check = True self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf'))", "= connection def get_resource(self, path, params): r = _make_admin_request(self.conn, \"GET\",", "self.bucket_info.bucket.explicit_placement.data_pool if explicit_pool is not None and explicit_pool != '':", "'bucket_info:', bucket_info return RBucket(self, bucket, bucket_info) def create_raw_bucket(self, name): return", "zone, bucket, bucket_info): self.zone = zone self.bucket = bucket self.name", "= RPlacementRule(self.bucket_info.placement_rule) self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule) except: pass def get_data_pool(self): try:", "try: placement_rule = obj_layout.manifest.tail_placement.placement_rule except: placement_rule = '' if placement_rule", "return {'__pickle': pickle.dumps(obj)} def rtest_decode_json(d): if '__pickle' in d: return", "bucket, bucket_info) def create_raw_bucket(self, name): return self.conn.regular.create_bucket(name) def get_raw_bucket(self, name):", "+ test._name k.set_contents_from_string(test.to_json()) def read_test_data(self, test): k = Key(self.config_bucket) k.key", "self.get_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info return RBucket(self, bucket,", "pickle import bunch import yaml import ConfigParser import rados from", "for e in j: setattr(self, e, j[e]) def save(self): suite.write_test_data(self)", "cfg.getint(section, var) except ConfigParser.NoOptionError: pass for var in bool_config_opts: try:", "= self.rgw_rest_admin.get_zone_params() self.placement_targets = {} for e in self.zone_params.placement_pools: self.placement_targets[e.key]", "RZone(conn) self.suite = RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES']) try: self.ceph_conf =", "in self.r_buckets: suite.zone.refresh_rbucket(rb) def test(self): suite.register_test(self) if suite.is_preparing(): self.prepare() self.save()", "object_hook=rtest_decode_json) for e in j: setattr(self, e, j[e]) def save(self):", "= cfg.get(section, var) except ConfigParser.NoOptionError: pass for var in int_config_opts:", "RBucket(self, bucket, bucket_info) def get_bucket(self, name): bucket = self.get_raw_bucket(name) bucket_info", "{} for e in self.zone_params.placement_pools: self.placement_targets[e.key] = e.val print 'zone_params:',", "self.host = host self.port = port self.is_secure = is_secure self.conn", "obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool except: pass try: # old style return obj_layout.manifest.tail_bucket.pool except:", "zone, suite_step): self.name = name self.bucket_prefix = bucket_prefix self.zone =", "config): if hasattr(config, 'storage_classes'): self.storage_classes = config.storage_classes else: try: self.storage_classes", "bucket_info) def get_bucket(self, name): bucket = self.get_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name)", "and explicit_pool != '': return explicit_pool return self.placement_target.get_data_pool(self.placement_rule) def get_tail_pool(self,", "0) return sc def get_all(self): for (name, _) in self.storage_classes.iteritems():", "self.storage_class = 'STANDARD' class RBucket: def __init__(self, zone, bucket, bucket_info):", "section_type = '' name = section self.config[name] = bunch.Bunch() cur", "except: raise RuntimeError( 'ceph_conf is missing under the [rados] section", "storage_class: storage_class = 'STANDARD' sc = self.storage_classes[storage_class] except: eq('could not", "= getattr(self, x) return json.dumps(attrs, cls=RTestJSONSerialize) def from_json(self, s): j", "j: setattr(self, e, j[e]) def save(self): suite.write_test_data(self) def load(self): suite.read_test_data(self)", "{} for x in dir(self): if x.startswith('r_'): attrs[x] = getattr(self,", "pass pr = RPlacementRule(placement_rule) return self.placement_target.get_data_pool(pr) class RStorageClasses: def __init__(self,", "key.version_id is not None: params['versionId'] = key.version_id print params return", "self.config[name] = bunch.Bunch() cur = self.config cur[name] = bunch.Bunch() for", "d[k] return None class RagweedEnv: def __init__(self): self.config = bunch.Bunch()", "return self.get_resource(path, params) def get_zone_params(self): return self.get_resource('/admin/config', {'type': 'zone'}) class", "name): bucket = self.create_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info", "yield rb def init(self): pass def prepare(self): pass def check(self):", "'ragweed' conn = bunch.Bunch() for (k, u) in self.config.user.iteritems(): conn[k]", "print \"rados pool>\", pool def setup_module(): global ragweed_env global suite", "from .reqs import _make_admin_request ragweed_env = None suite = None", "bucket_id) return result.data.bucket_info def check_bucket_index(self, bucket_name): return self.get_resource('/admin/bucket',{'index' : None,", "str(bid)) bucket_name = bucket_name.replace(\"_\", \"-\") rb = suite.zone.create_bucket(bucket_name) self.r_buckets.append(rb) return", "return self.get_resource('/admin/metadata', {'key': key}) def get_bucket_entrypoint(self, bucket_name): return self.read_meta_key('bucket:' +", "+ bucket_name) def get_bucket_instance_info(self, bucket_name, bucket_id = None): if not", "style return obj_layout.manifest.tail_bucket.pool except: pass pr = RPlacementRule(placement_rule) return self.placement_target.get_data_pool(pr)", "RStorageClasses: def __init__(self, config): if hasattr(config, 'storage_classes'): self.storage_classes = config.storage_classes", "= config.storage_classes else: try: self.storage_classes = bunch.bunchify({ 'STANDARD': { 'data_pool':", "= ConfigParser.RawConfigParser() try: path = os.environ['RAGWEED_CONF'] except KeyError: raise RuntimeError(", "name): return self.conn.regular.get_bucket(name) def refresh_rbucket(self, rbucket): rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name) rbucket.bucket_info", "step == 'test': self.do_check = True self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf')) def", "import attr from nose.tools import eq_ as eq from .reqs", "= secret_key, host=host, port=port, is_secure=is_secure, calling_format = boto.s3.connection.OrdinaryCallingFormat(), ) def", "= boto.s3.connection.OrdinaryCallingFormat(), ) def create_bucket(self, name): return self.conn.create_bucket(name) def get_bucket(self,", "== 2): self.storage_class=r[1] else: self.storage_class = 'STANDARD' class RBucket: def", "e.val print 'zone_params:', self.zone_params def get_placement_target(self, placement_id): plid = placement_id", "params): r = _make_admin_request(self.conn, \"GET\", path, params) if r.status !=", "= config.data_extra_pool self.storage_classes = RStorageClasses(config) if not self.data_extra_pool: self.data_extra_pool =", "try: self.ceph_conf = self.config.rados.ceph_conf except: raise RuntimeError( 'ceph_conf is missing", "self.storage_classes[storage_class] except: eq('could not find storage class ' + storage_class,", "with file(path) as f: cfg.readfp(f) for section in cfg.sections(): try:", "pickle.loads(str(d['__pickle'])) return d class RPlacementRule: def __init__(self, rule): r =", "self.placement_id = r[0] if (len(r) == 2): self.storage_class=r[1] else: self.storage_class", "in self.r_buckets: yield rb def init(self): pass def prepare(self): pass", "= bunch.Bunch() for (k, u) in self.config.user.iteritems(): conn[k] = RGWConnection(u.access_key,", "def write_test_data(self, test): k = Key(self.config_bucket) k.key = 'tests/' +", "var) except ConfigParser.NoOptionError: pass for var in bool_config_opts: try: cur[name][var]", "save(self): suite.write_test_data(self) def load(self): suite.read_test_data(self) for rb in self.r_buckets: suite.zone.refresh_rbucket(rb)", "environment ' + 'variable RAGWEED_CONF to a config file.', )", "placement_id): plid = placement_id if placement_id is None or placement_id", "boto.s3.key import Key from nose.plugins.attrib import attr from nose.tools import", "if r.status != 200: raise boto.exception.S3ResponseError(r.status, r.reason) return bunch.bunchify(json.loads(r.read())) def", "file(path) as f: cfg.readfp(f) for section in cfg.sections(): try: (section_type,", "type(None))): return JSONEncoder.default(self, obj) return {'__pickle': pickle.dumps(obj)} def rtest_decode_json(d): if", "new style explicit pool explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool if explicit_pool is", "def get_bucket(self, name, validate=True): return self.conn.get_bucket(name, validate=validate) class RGWRESTAdmin: def", "return explicit_pool return self.placement_target.get_data_pool(self.placement_rule) def get_tail_pool(self, obj_layout): try: placement_rule =", "return self.bucket_prefix + '-' + suffix def register_test(self, t): self.rtests.append(t)", "'': try: # new style return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool except: pass try:", "self.read_meta_key('bucket.instance:' + bucket_name + \":\" + bucket_id) return result.data.bucket_info def", "self.check() def read_config(fp): config = bunch.Bunch() g = yaml.safe_load_all(fp) for", "new in g: print bunch.bunchify(new) config.update(bunch.bunchify(new)) return config str_config_opts =", "self.name = bucket.name self.bucket_info = bucket_info try: self.placement_rule = RPlacementRule(self.bucket_info.placement_rule)", "obj): if isinstance(obj, (list, dict, str, unicode, int, float, bool,", "get_default_placement(self): return get_placement_target(self.zone_params.default_placement) def create_bucket(self, name): bucket = self.create_raw_bucket(name) bucket_info", "== '': try: # new style return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool except: pass", "to_json(self): attrs = {} for x in dir(self): if x.startswith('r_'):", "in bool_config_opts: try: cur[name][var] = cfg.getboolean(section, var) except ConfigParser.NoOptionError: pass", "for pool in pools: print \"rados pool>\", pool def setup_module():", "config.index_pool self.data_extra_pool = config.data_extra_pool self.storage_classes = RStorageClasses(config) if not self.data_extra_pool:", "return JSONEncoder.default(self, obj) return {'__pickle': pickle.dumps(obj)} def rtest_decode_json(d): if '__pickle'", "pass def get_data_pool(self): try: # old style explicit pool explicit_pool", "def __init__(self, config): if hasattr(config, 'storage_classes'): self.storage_classes = config.storage_classes else:", "{'type': 'zone'}) class RSuite: def __init__(self, name, bucket_prefix, zone, suite_step):", "rb def init(self): pass def prepare(self): pass def check(self): pass", "inspect import pickle import bunch import yaml import ConfigParser import", "<gh_stars>0 import sys import os import boto import boto.s3.connection import", "import json import inspect import pickle import bunch import yaml", "suite.read_test_data(self) for rb in self.r_buckets: suite.zone.refresh_rbucket(rb) def test(self): suite.register_test(self) if", "e in j: setattr(self, e, j[e]) def save(self): suite.write_test_data(self) def", "self.bucket_prefix = bucket_prefix self.zone = zone self.config_bucket = None self.rtests", "json.loads(s, object_hook=rtest_decode_json) for e in j: setattr(self, e, j[e]) def", "d.has_key(k): return d[k] return None class RagweedEnv: def __init__(self): self.config", "k.key = 'tests/' + test._name k.set_contents_from_string(test.to_json()) def read_test_data(self, test): k", "= True self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf')) def get_bucket_name(self, suffix): return self.bucket_prefix", "if placement_rule == '': try: # new style return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool", "None suite = None class RGWConnection: def __init__(self, access_key, secret_key,", "+ storage_class, 0) return sc def get_all(self): for (name, _)", "params) def get_zone_params(self): return self.get_resource('/admin/config', {'type': 'zone'}) class RSuite: def", "return self.conn.regular.get_bucket(name) def refresh_rbucket(self, rbucket): rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name) rbucket.bucket_info =", "'To run tests, point environment ' + 'variable RAGWEED_CONF to", "import eq_ as eq from .reqs import _make_admin_request ragweed_env =", "self.data_extra_pool: self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD') def get_data_pool(self, placement_rule): return self.storage_classes.get(placement_rule.storage_class).data_pool class", "self.storage_classes.get(placement_rule.storage_class).data_pool class RZone: def __init__(self, conn): self.conn = conn self.rgw_rest_admin", "None class RagweedEnv: def __init__(self): self.config = bunch.Bunch() cfg =", "self.do_preparing = False self.do_check = False for step in suite_step.split(','):", "(section_type, name) = section.split(None, 1) if not self.config.has_key(section_type): self.config[section_type] =", "return self.conn.get_bucket(name, validate=validate) class RGWRESTAdmin: def __init__(self, connection): self.conn =", "for var in bool_config_opts: try: cur[name][var] = cfg.getboolean(section, var) except", "= self.rados.list_pools() for pool in pools: print \"rados pool>\", pool", "'zone_params=', self.zone_params plid = self.zone_params.default_placement try: return RPlacementTarget(plid, self.placement_targets[plid]) except:", "\"GET\", path, params) if r.status != 200: raise boto.exception.S3ResponseError(r.status, r.reason)", "'test': self.do_check = True self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf')) def get_bucket_name(self, suffix):", "if explicit_pool is not None and explicit_pool != '': return", "return RPlacementTarget(plid, self.placement_targets[plid]) except: pass return None def get_default_placement(self): return", "print 'bucket_info:', bucket_info return RBucket(self, bucket, bucket_info) def create_raw_bucket(self, name):", "cfg.readfp(f) for section in cfg.sections(): try: (section_type, name) = section.split(None,", "style return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool except: pass try: # old style return", "= RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure')) self.zone =", "in self.storage_classes.iteritems(): yield name class RPlacementTarget: def __init__(self, name, config):", "' + os.environ['RAGWEED_CONF'] ) self.rados = rados.Rados(conffile=self.ceph_conf) self.rados.connect() pools =", "float, bool, type(None))): return JSONEncoder.default(self, obj) return {'__pickle': pickle.dumps(obj)} def", "= RGWRESTAdmin(self.conn.system) self.zone_params = self.rgw_rest_admin.get_zone_params() self.placement_targets = {} for e", "def read_test_data(self, test): k = Key(self.config_bucket) k.key = 'tests/' +", "var in str_config_opts: try: cur[name][var] = cfg.get(section, var) except ConfigParser.NoOptionError:", "pass def check(self): pass def to_json(self): attrs = {} for", "return rb def get_buckets(self): for rb in self.r_buckets: yield rb", "k): if d.has_key(k): return d[k] return None class RagweedEnv: def", "rgw_conf.bucket_prefix except: self.bucket_prefix = 'ragweed' conn = bunch.Bunch() for (k,", "RZone: def __init__(self, conn): self.conn = conn self.rgw_rest_admin = RGWRESTAdmin(self.conn.system)", "rb in self.r_buckets: suite.zone.refresh_rbucket(rb) def test(self): suite.register_test(self) if suite.is_preparing(): self.prepare()", "RagweedEnv: def __init__(self): self.config = bunch.Bunch() cfg = ConfigParser.RawConfigParser() try:", "g = yaml.safe_load_all(fp) for new in g: print bunch.bunchify(new) config.update(bunch.bunchify(new))", "= bunch.Bunch() g = yaml.safe_load_all(fp) for new in g: print", "__init__(self, zone, bucket, bucket_info): self.zone = zone self.bucket = bucket", "self.placement_targets[plid]) except: pass return None def get_default_placement(self): return get_placement_target(self.zone_params.default_placement) def", "def __init__(self, rule): r = rule.split('/', 1) self.placement_id = r[0]", "= None suite = None class RGWConnection: def __init__(self, access_key,", "def get_tail_pool(self, obj_layout): try: placement_rule = obj_layout.manifest.tail_placement.placement_rule except: placement_rule =", "ep = self.get_bucket_entrypoint(bucket_name) print ep bucket_id = ep.data.bucket.bucket_id result =", "= bunch.Bunch() for var in str_config_opts: try: cur[name][var] = cfg.get(section,", "get_buckets(self): for rb in self.r_buckets: yield rb def init(self): pass", "for step in suite_step.split(','): if step == 'prepare': self.do_preparing =", "self.placement_rule = RPlacementRule(self.bucket_info.placement_rule) self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule) except: pass def get_data_pool(self):", "old style explicit pool explicit_pool = self.bucket_info.bucket.pool except: # new", "self.index_pool = config.index_pool self.data_extra_pool = config.data_extra_pool self.storage_classes = RStorageClasses(config) if", "suite_step.split(','): if step == 'prepare': self.do_preparing = True self.config_bucket =", "= bunch.Bunch() cfg = ConfigParser.RawConfigParser() try: path = os.environ['RAGWEED_CONF'] except", "host self.port = port self.is_secure = is_secure self.conn = boto.connect_s3(", "except ConfigParser.NoOptionError: pass print json.dumps(self.config) rgw_conf = self.config.rgw try: self.bucket_prefix", "rb = suite.zone.create_bucket(bucket_name) self.r_buckets.append(rb) return rb def get_buckets(self): for rb", "'port'), dict_find(rgw_conf, 'is_secure')) self.zone = RZone(conn) self.suite = RSuite('ragweed', self.bucket_prefix,", "__init__(self, rule): r = rule.split('/', 1) self.placement_id = r[0] if", "= False self.do_check = False for step in suite_step.split(','): if", "self.storage_classes = None pass def get(self, storage_class): assert(self.storage_classes != None)", "x) return json.dumps(attrs, cls=RTestJSONSerialize) def from_json(self, s): j = json.loads(s,", "= json.loads(s, object_hook=rtest_decode_json) for e in j: setattr(self, e, j[e])", "bunch.Bunch() cur = self.config[section_type] except ValueError: section_type = '' name", "self.create_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info return RBucket(self, bucket,", "= self.get_raw_bucket(rbucket.bucket.name) rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name) class RTest: def __init__(self): self._name", "if not storage_class: storage_class = 'STANDARD' sc = self.storage_classes[storage_class] except:", "dict, str, unicode, int, float, bool, type(None))): return JSONEncoder.default(self, obj)", "bool_config_opts: try: cur[name][var] = cfg.getboolean(section, var) except ConfigParser.NoOptionError: pass print", "def init(self): pass def prepare(self): pass def check(self): pass def", "else: try: self.storage_classes = bunch.bunchify({ 'STANDARD': { 'data_pool': config.data_pool }})", "from nose.tools import eq_ as eq from .reqs import _make_admin_request", "= [] self.init() def create_bucket(self): bid = len(self.r_buckets) + 1", "= bucket_prefix self.zone = zone self.config_bucket = None self.rtests =", "obj_layout.manifest.tail_placement.placement_rule except: placement_rule = '' if placement_rule == '': try:", "1) self.placement_id = r[0] if (len(r) == 2): self.storage_class=r[1] else:", "+ key.name params = {'layout': None} if key.version_id is not", "= k.get_contents_as_string() print 'read_test_data=', s test.from_json(s) def is_preparing(self): return self.do_preparing", "bunch.Bunch() for var in str_config_opts: try: cur[name][var] = cfg.get(section, var)", "connection def get_resource(self, path, params): r = _make_admin_request(self.conn, \"GET\", path,", "= ep.data.bucket.bucket_id result = self.read_meta_key('bucket.instance:' + bucket_name + \":\" +", "self.bucket = bucket self.name = bucket.name self.bucket_info = bucket_info try:", "try: path = os.environ['RAGWEED_CONF'] except KeyError: raise RuntimeError( 'To run", "getattr(self, x) return json.dumps(attrs, cls=RTestJSONSerialize) def from_json(self, s): j =", "storage_class, 0) return sc def get_all(self): for (name, _) in", "= self.read_meta_key('bucket.instance:' + bucket_name + \":\" + bucket_id) return result.data.bucket_info", "bucket_info try: self.placement_rule = RPlacementRule(self.bucket_info.placement_rule) self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule) except: pass", "+ test._name s = k.get_contents_as_string() print 'read_test_data=', s test.from_json(s) def", "pool in pools: print \"rados pool>\", pool def setup_module(): global", "+ \":\" + bucket_id) return result.data.bucket_info def check_bucket_index(self, bucket_name): return", "= None class RGWConnection: def __init__(self, access_key, secret_key, host, port,", "True self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf')) if step == 'check' or step", "config.data_extra_pool self.storage_classes = RStorageClasses(config) if not self.data_extra_pool: self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD')", "get_placement_target(self.zone_params.default_placement) def create_bucket(self, name): bucket = self.create_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name)", "find storage class ' + storage_class, 0) return sc def", "dir(self): if x.startswith('r_'): attrs[x] = getattr(self, x) return json.dumps(attrs, cls=RTestJSONSerialize)", "as eq from .reqs import _make_admin_request ragweed_env = None suite", "RBucket(self, bucket, bucket_info) def create_raw_bucket(self, name): return self.conn.regular.create_bucket(name) def get_raw_bucket(self,", "] def dict_find(d, k): if d.has_key(k): return d[k] return None", "' + storage_class, 0) return sc def get_all(self): for (name,", "plid = self.zone_params.default_placement try: return RPlacementTarget(plid, self.placement_targets[plid]) except: pass return", "refresh_rbucket(self, rbucket): rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name) rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name) class RTest:", "section in cfg.sections(): try: (section_type, name) = section.split(None, 1) if", "= '/' + key.bucket.name + '/' + key.name params =", "pass for var in int_config_opts: try: cur[name][var] = cfg.getint(section, var)", "bucket_id = ep.data.bucket.bucket_id result = self.read_meta_key('bucket.instance:' + bucket_name + \":\"", "get_zone_params(self): return self.get_resource('/admin/config', {'type': 'zone'}) class RSuite: def __init__(self, name,", "try: self.storage_classes = bunch.bunchify({ 'STANDARD': { 'data_pool': config.data_pool }}) except:", "placement_id == '': print 'zone_params=', self.zone_params plid = self.zone_params.default_placement try:", "r = _make_admin_request(self.conn, \"GET\", path, params) if r.status != 200:", "get_placement_target(self, placement_id): plid = placement_id if placement_id is None or", "__init__(self, name, bucket_prefix, zone, suite_step): self.name = name self.bucket_prefix =", "os.environ['RAGWEED_CONF'] ) self.rados = rados.Rados(conffile=self.ceph_conf) self.rados.connect() pools = self.rados.list_pools() for", "self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info return RBucket(self, bucket, bucket_info) def create_raw_bucket(self,", "not None: params['versionId'] = key.version_id print params return self.get_resource(path, params)", "self.read_meta_key('bucket:' + bucket_name) def get_bucket_instance_info(self, bucket_name, bucket_id = None): if", "rbucket): rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name) rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name) class RTest: def", "= None pass def get(self, storage_class): assert(self.storage_classes != None) try:", "+ '-' + suffix def register_test(self, t): self.rtests.append(t) def write_test_data(self,", "write_test_data(self, test): k = Key(self.config_bucket) k.key = 'tests/' + test._name", "not self.data_extra_pool: self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD') def get_data_pool(self, placement_rule): return self.storage_classes.get(placement_rule.storage_class).data_pool", "bucket, bucket_info): self.zone = zone self.bucket = bucket self.name =", "import pickle import bunch import yaml import ConfigParser import rados", "import rados from boto.s3.key import Key from nose.plugins.attrib import attr", "bucket_name = bucket_name.replace(\"_\", \"-\") rb = suite.zone.create_bucket(bucket_name) self.r_buckets.append(rb) return rb", "self.storage_class=r[1] else: self.storage_class = 'STANDARD' class RBucket: def __init__(self, zone,", "if x.startswith('r_'): attrs[x] = getattr(self, x) return json.dumps(attrs, cls=RTestJSONSerialize) def", "if not self.config.has_key(section_type): self.config[section_type] = bunch.Bunch() self.config[section_type][name] = bunch.Bunch() cur", "create_bucket(self): bid = len(self.r_buckets) + 1 bucket_name = suite.get_bucket_name(self._name +", "bool, type(None))): return JSONEncoder.default(self, obj) return {'__pickle': pickle.dumps(obj)} def rtest_decode_json(d):", "register_test(self, t): self.rtests.append(t) def write_test_data(self, test): k = Key(self.config_bucket) k.key", "nose.tools import eq_ as eq from .reqs import _make_admin_request ragweed_env", "= port self.is_secure = is_secure self.conn = boto.connect_s3( aws_access_key_id =", "self.zone_params plid = self.zone_params.default_placement try: return RPlacementTarget(plid, self.placement_targets[plid]) except: pass", "attrs = {} for x in dir(self): if x.startswith('r_'): attrs[x]", "= '' name = section self.config[name] = bunch.Bunch() cur =", "self.suite = RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES']) try: self.ceph_conf = self.config.rados.ceph_conf", "self.rados = rados.Rados(conffile=self.ceph_conf) self.rados.connect() pools = self.rados.list_pools() for pool in", "+ bucket_name + \":\" + bucket_id) return result.data.bucket_info def check_bucket_index(self,", "= RStorageClasses(config) if not self.data_extra_pool: self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD') def get_data_pool(self,", "self.rtests = [] self.do_preparing = False self.do_check = False for", "'is_secure')) self.zone = RZone(conn) self.suite = RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES'])", "if not self.data_extra_pool: self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD') def get_data_pool(self, placement_rule): return", "RAGWEED_CONF to a config file.', ) with file(path) as f:", "1 bucket_name = suite.get_bucket_name(self._name + '.' + str(bid)) bucket_name =", "key.bucket.name + '/' + key.name params = {'layout': None} if", "self.name = name self.bucket_prefix = bucket_prefix self.zone = zone self.config_bucket", "if '__pickle' in d: return pickle.loads(str(d['__pickle'])) return d class RPlacementRule:", "sc def get_all(self): for (name, _) in self.storage_classes.iteritems(): yield name", "= conn self.rgw_rest_admin = RGWRESTAdmin(self.conn.system) self.zone_params = self.rgw_rest_admin.get_zone_params() self.placement_targets =", "validate=validate) class RGWRESTAdmin: def __init__(self, connection): self.conn = connection def", "self.__class__.__name__ self.r_buckets = [] self.init() def create_bucket(self): bid = len(self.r_buckets)", "except: pass def get_data_pool(self): try: # old style explicit pool", "if suite.is_checking(): self.load() self.check() def read_config(fp): config = bunch.Bunch() g", "self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule) except: pass def get_data_pool(self): try: # old", "[rados] section in ' + os.environ['RAGWEED_CONF'] ) self.rados = rados.Rados(conffile=self.ceph_conf)", "if isinstance(obj, (list, dict, str, unicode, int, float, bool, type(None))):", "unicode, int, float, bool, type(None))): return JSONEncoder.default(self, obj) return {'__pickle':", "= [ 'is_secure', ] def dict_find(d, k): if d.has_key(k): return", ") with file(path) as f: cfg.readfp(f) for section in cfg.sections():", "get_bucket_name(self, suffix): return self.bucket_prefix + '-' + suffix def register_test(self,", "# new style explicit pool explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool if explicit_pool", "try: cur[name][var] = cfg.getboolean(section, var) except ConfigParser.NoOptionError: pass print json.dumps(self.config)", "under the [rados] section in ' + os.environ['RAGWEED_CONF'] ) self.rados", "self.zone = zone self.bucket = bucket self.name = bucket.name self.bucket_info", "= suite.get_bucket_name(self._name + '.' + str(bid)) bucket_name = bucket_name.replace(\"_\", \"-\")", "rb in self.r_buckets: yield rb def init(self): pass def prepare(self):", "rados.Rados(conffile=self.ceph_conf) self.rados.connect() pools = self.rados.list_pools() for pool in pools: print", "= self.config[section_type] except ValueError: section_type = '' name = section", "2): self.storage_class=r[1] else: self.storage_class = 'STANDARD' class RBucket: def __init__(self,", "except: eq('could not find storage class ' + storage_class, 0)", "pass for var in bool_config_opts: try: cur[name][var] = cfg.getboolean(section, var)", "if d.has_key(k): return d[k] return None class RagweedEnv: def __init__(self):", "zone self.config_bucket = None self.rtests = [] self.do_preparing = False", "self.config.rados.ceph_conf except: raise RuntimeError( 'ceph_conf is missing under the [rados]", "bunch.Bunch() cfg = ConfigParser.RawConfigParser() try: path = os.environ['RAGWEED_CONF'] except KeyError:", "= None): if not bucket_id: ep = self.get_bucket_entrypoint(bucket_name) print ep", "class RPlacementTarget: def __init__(self, name, config): self.name = name self.index_pool", "try: self.placement_rule = RPlacementRule(self.bucket_info.placement_rule) self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule) except: pass def", "self.is_secure = is_secure self.conn = boto.connect_s3( aws_access_key_id = access_key, aws_secret_access_key", "def get_bucket_name(self, suffix): return self.bucket_prefix + '-' + suffix def", "self.do_check class RTestJSONSerialize(json.JSONEncoder): def default(self, obj): if isinstance(obj, (list, dict,", "= r[0] if (len(r) == 2): self.storage_class=r[1] else: self.storage_class =", "self.r_buckets = [] self.init() def create_bucket(self): bid = len(self.r_buckets) +", "test(self): suite.register_test(self) if suite.is_preparing(): self.prepare() self.save() if suite.is_checking(): self.load() self.check()", "point environment ' + 'variable RAGWEED_CONF to a config file.',", "or placement_id == '': print 'zone_params=', self.zone_params plid = self.zone_params.default_placement", "print params return self.get_resource(path, params) def get_zone_params(self): return self.get_resource('/admin/config', {'type':", "None, 'bucket':bucket_name}) def get_obj_layout(self, key): path = '/' + key.bucket.name", "bucket_prefix, zone, suite_step): self.name = name self.bucket_prefix = bucket_prefix self.zone", "class ' + storage_class, 0) return sc def get_all(self): for", "explicit pool explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool if explicit_pool is not None", "self.zone_params.placement_pools: self.placement_targets[e.key] = e.val print 'zone_params:', self.zone_params def get_placement_target(self, placement_id):", "dict_find(rgw_conf, 'is_secure')) self.zone = RZone(conn) self.suite = RSuite('ragweed', self.bucket_prefix, self.zone,", "== 'check' or step == 'test': self.do_check = True self.config_bucket", "self.zone.get_placement_target(self.bucket_info.placement_rule) except: pass def get_data_pool(self): try: # old style explicit", "in int_config_opts: try: cur[name][var] = cfg.getint(section, var) except ConfigParser.NoOptionError: pass", "conn[k] = RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure')) self.zone", "default(self, obj): if isinstance(obj, (list, dict, str, unicode, int, float,", "pass def to_json(self): attrs = {} for x in dir(self):", "bunch.bunchify(json.loads(r.read())) def read_meta_key(self, key): return self.get_resource('/admin/metadata', {'key': key}) def get_bucket_entrypoint(self,", "class RStorageClasses: def __init__(self, config): if hasattr(config, 'storage_classes'): self.storage_classes =", "access_key, secret_key, host, port, is_secure): self.host = host self.port =", "return self.placement_target.get_data_pool(pr) class RStorageClasses: def __init__(self, config): if hasattr(config, 'storage_classes'):", "self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES']) try: self.ceph_conf = self.config.rados.ceph_conf except: raise RuntimeError(", "name): bucket = self.get_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info", "cur[name][var] = cfg.get(section, var) except ConfigParser.NoOptionError: pass for var in", "ep bucket_id = ep.data.bucket.bucket_id result = self.read_meta_key('bucket.instance:' + bucket_name +", "= section.split(None, 1) if not self.config.has_key(section_type): self.config[section_type] = bunch.Bunch() self.config[section_type][name]", "self.config.rgw try: self.bucket_prefix = rgw_conf.bucket_prefix except: self.bucket_prefix = 'ragweed' conn", "except: pass try: # old style return obj_layout.manifest.tail_bucket.pool except: pass", "is_secure): self.host = host self.port = port self.is_secure = is_secure", "def test(self): suite.register_test(self) if suite.is_preparing(): self.prepare() self.save() if suite.is_checking(): self.load()", "int_config_opts: try: cur[name][var] = cfg.getint(section, var) except ConfigParser.NoOptionError: pass for", "= 'STANDARD' sc = self.storage_classes[storage_class] except: eq('could not find storage", "params['versionId'] = key.version_id print params return self.get_resource(path, params) def get_zone_params(self):", "test._name s = k.get_contents_as_string() print 'read_test_data=', s test.from_json(s) def is_preparing(self):", "key): return self.get_resource('/admin/metadata', {'key': key}) def get_bucket_entrypoint(self, bucket_name): return self.read_meta_key('bucket:'", "'secret_key', 'host', 'ceph_conf', 'bucket_prefix', ] int_config_opts = [ 'port', ]", "= Key(self.config_bucket) k.key = 'tests/' + test._name s = k.get_contents_as_string()", "[ 'port', ] bool_config_opts = [ 'is_secure', ] def dict_find(d,", "port self.is_secure = is_secure self.conn = boto.connect_s3( aws_access_key_id = access_key,", "self.rados.list_pools() for pool in pools: print \"rados pool>\", pool def", "None} if key.version_id is not None: params['versionId'] = key.version_id print", "in ' + os.environ['RAGWEED_CONF'] ) self.rados = rados.Rados(conffile=self.ceph_conf) self.rados.connect() pools", "f: cfg.readfp(f) for section in cfg.sections(): try: (section_type, name) =", "None def get_default_placement(self): return get_placement_target(self.zone_params.default_placement) def create_bucket(self, name): bucket =", "self.conn = boto.connect_s3( aws_access_key_id = access_key, aws_secret_access_key = secret_key, host=host,", "storage_class = 'STANDARD' sc = self.storage_classes[storage_class] except: eq('could not find", "'host', 'ceph_conf', 'bucket_prefix', ] int_config_opts = [ 'port', ] bool_config_opts", "= self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print 'bucket_info:', bucket_info return RBucket(self, bucket, bucket_info) def", "ragweed_env = None suite = None class RGWConnection: def __init__(self,", "None) try: if not storage_class: storage_class = 'STANDARD' sc =", "if placement_id is None or placement_id == '': print 'zone_params=',", "RGWRESTAdmin: def __init__(self, connection): self.conn = connection def get_resource(self, path,", "= bunch.Bunch() cur = self.config cur[name] = bunch.Bunch() for var", "[ 'user_id', 'access_key', 'secret_key', 'host', 'ceph_conf', 'bucket_prefix', ] int_config_opts =", "self.conn.get_bucket(name, validate=validate) class RGWRESTAdmin: def __init__(self, connection): self.conn = connection", "!= 200: raise boto.exception.S3ResponseError(r.status, r.reason) return bunch.bunchify(json.loads(r.read())) def read_meta_key(self, key):", "in cfg.sections(): try: (section_type, name) = section.split(None, 1) if not", "bucket, bucket_info) def get_bucket(self, name): bucket = self.get_raw_bucket(name) bucket_info =", "params) if r.status != 200: raise boto.exception.S3ResponseError(r.status, r.reason) return bunch.bunchify(json.loads(r.read()))", "rb def get_buckets(self): for rb in self.r_buckets: yield rb def", "def setup_module(): global ragweed_env global suite ragweed_env = RagweedEnv() suite", "bucket_name): return self.get_resource('/admin/bucket',{'index' : None, 'bucket':bucket_name}) def get_obj_layout(self, key): path", "try: self.bucket_prefix = rgw_conf.bucket_prefix except: self.bucket_prefix = 'ragweed' conn =", "is missing under the [rados] section in ' + os.environ['RAGWEED_CONF']", "= RPlacementRule(placement_rule) return self.placement_target.get_data_pool(pr) class RStorageClasses: def __init__(self, config): if", "= e.val print 'zone_params:', self.zone_params def get_placement_target(self, placement_id): plid =", "'ceph_conf', 'bucket_prefix', ] int_config_opts = [ 'port', ] bool_config_opts =", "def is_preparing(self): return self.do_preparing def is_checking(self): return self.do_check class RTestJSONSerialize(json.JSONEncoder):", "def dict_find(d, k): if d.has_key(k): return d[k] return None class", "== 'test': self.do_check = True self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf')) def get_bucket_name(self,", "'access_key', 'secret_key', 'host', 'ceph_conf', 'bucket_prefix', ] int_config_opts = [ 'port',", "bucket_name, bucket_id = None): if not bucket_id: ep = self.get_bucket_entrypoint(bucket_name)", "class RGWRESTAdmin: def __init__(self, connection): self.conn = connection def get_resource(self,", "pickle.dumps(obj)} def rtest_decode_json(d): if '__pickle' in d: return pickle.loads(str(d['__pickle'])) return", "obj_layout.manifest.tail_bucket.pool except: pass pr = RPlacementRule(placement_rule) return self.placement_target.get_data_pool(pr) class RStorageClasses:", "import ConfigParser import rados from boto.s3.key import Key from nose.plugins.attrib", "'zone'}) class RSuite: def __init__(self, name, bucket_prefix, zone, suite_step): self.name", "k.get_contents_as_string() print 'read_test_data=', s test.from_json(s) def is_preparing(self): return self.do_preparing def", "def rtest_decode_json(d): if '__pickle' in d: return pickle.loads(str(d['__pickle'])) return d", "d class RPlacementRule: def __init__(self, rule): r = rule.split('/', 1)", "not None and explicit_pool != '': return explicit_pool return self.placement_target.get_data_pool(self.placement_rule)", "ConfigParser.RawConfigParser() try: path = os.environ['RAGWEED_CONF'] except KeyError: raise RuntimeError( 'To", "suite.register_test(self) if suite.is_preparing(): self.prepare() self.save() if suite.is_checking(): self.load() self.check() def", "+ key.bucket.name + '/' + key.name params = {'layout': None}", "self.get_bucket_entrypoint(bucket_name) print ep bucket_id = ep.data.bucket.bucket_id result = self.read_meta_key('bucket.instance:' +", "boto.s3.connection.OrdinaryCallingFormat(), ) def create_bucket(self, name): return self.conn.create_bucket(name) def get_bucket(self, name,", "suffix def register_test(self, t): self.rtests.append(t) def write_test_data(self, test): k =", "x in dir(self): if x.startswith('r_'): attrs[x] = getattr(self, x) return", "_make_admin_request(self.conn, \"GET\", path, params) if r.status != 200: raise boto.exception.S3ResponseError(r.status,", "explicit pool explicit_pool = self.bucket_info.bucket.pool except: # new style explicit", "RSuite: def __init__(self, name, bucket_prefix, zone, suite_step): self.name = name", "cfg.get(section, var) except ConfigParser.NoOptionError: pass for var in int_config_opts: try:", "= zone self.bucket = bucket self.name = bucket.name self.bucket_info =", "except: self.bucket_prefix = 'ragweed' conn = bunch.Bunch() for (k, u)", "= {'layout': None} if key.version_id is not None: params['versionId'] =", "!= None) try: if not storage_class: storage_class = 'STANDARD' sc", "bucket_prefix self.zone = zone self.config_bucket = None self.rtests = []", "cur = self.config[section_type] except ValueError: section_type = '' name =", "None): if not bucket_id: ep = self.get_bucket_entrypoint(bucket_name) print ep bucket_id", "storage_class): assert(self.storage_classes != None) try: if not storage_class: storage_class =", "config str_config_opts = [ 'user_id', 'access_key', 'secret_key', 'host', 'ceph_conf', 'bucket_prefix',", "= 'tests/' + test._name k.set_contents_from_string(test.to_json()) def read_test_data(self, test): k =", ") def create_bucket(self, name): return self.conn.create_bucket(name) def get_bucket(self, name, validate=True):", "def create_raw_bucket(self, name): return self.conn.regular.create_bucket(name) def get_raw_bucket(self, name): return self.conn.regular.get_bucket(name)", "pass print json.dumps(self.config) rgw_conf = self.config.rgw try: self.bucket_prefix = rgw_conf.bucket_prefix", "'variable RAGWEED_CONF to a config file.', ) with file(path) as", "= access_key, aws_secret_access_key = secret_key, host=host, port=port, is_secure=is_secure, calling_format =", "dict_find(d, k): if d.has_key(k): return d[k] return None class RagweedEnv:", "bunch import yaml import ConfigParser import rados from boto.s3.key import", "cfg.sections(): try: (section_type, name) = section.split(None, 1) if not self.config.has_key(section_type):", "bunch.bunchify(new) config.update(bunch.bunchify(new)) return config str_config_opts = [ 'user_id', 'access_key', 'secret_key',", "def get_bucket_instance_info(self, bucket_name, bucket_id = None): if not bucket_id: ep", "is_preparing(self): return self.do_preparing def is_checking(self): return self.do_check class RTestJSONSerialize(json.JSONEncoder): def", "bucket_name + \":\" + bucket_id) return result.data.bucket_info def check_bucket_index(self, bucket_name):", "for (name, _) in self.storage_classes.iteritems(): yield name class RPlacementTarget: def", "def __init__(self, conn): self.conn = conn self.rgw_rest_admin = RGWRESTAdmin(self.conn.system) self.zone_params", "__init__(self, conn): self.conn = conn self.rgw_rest_admin = RGWRESTAdmin(self.conn.system) self.zone_params =", "from_json(self, s): j = json.loads(s, object_hook=rtest_decode_json) for e in j:", "bucket_info return RBucket(self, bucket, bucket_info) def create_raw_bucket(self, name): return self.conn.regular.create_bucket(name)", "for section in cfg.sections(): try: (section_type, name) = section.split(None, 1)", "new style return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool except: pass try: # old style", "(k, u) in self.config.user.iteritems(): conn[k] = RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf,", "def load(self): suite.read_test_data(self) for rb in self.r_buckets: suite.zone.refresh_rbucket(rb) def test(self):", "except ValueError: section_type = '' name = section self.config[name] =", "placement_rule = obj_layout.manifest.tail_placement.placement_rule except: placement_rule = '' if placement_rule ==", ".reqs import _make_admin_request ragweed_env = None suite = None class", "\":\" + bucket_id) return result.data.bucket_info def check_bucket_index(self, bucket_name): return self.get_resource('/admin/bucket',{'index'", "not storage_class: storage_class = 'STANDARD' sc = self.storage_classes[storage_class] except: eq('could", "str_config_opts = [ 'user_id', 'access_key', 'secret_key', 'host', 'ceph_conf', 'bucket_prefix', ]", "int_config_opts = [ 'port', ] bool_config_opts = [ 'is_secure', ]", "def read_config(fp): config = bunch.Bunch() g = yaml.safe_load_all(fp) for new", "print 'bucket_info:', bucket_info return RBucket(self, bucket, bucket_info) def get_bucket(self, name):", "self.bucket_info = bucket_info try: self.placement_rule = RPlacementRule(self.bucket_info.placement_rule) self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule)", "__init__(self): self._name = self.__class__.__name__ self.r_buckets = [] self.init() def create_bucket(self):", "except ConfigParser.NoOptionError: pass for var in int_config_opts: try: cur[name][var] =", "try: # old style return obj_layout.manifest.tail_bucket.pool except: pass pr =", "self.get_resource(path, params) def get_zone_params(self): return self.get_resource('/admin/config', {'type': 'zone'}) class RSuite:", "name): return self.conn.create_bucket(name) def get_bucket(self, name, validate=True): return self.conn.get_bucket(name, validate=validate)", "e in self.zone_params.placement_pools: self.placement_targets[e.key] = e.val print 'zone_params:', self.zone_params def", "r.status != 200: raise boto.exception.S3ResponseError(r.status, r.reason) return bunch.bunchify(json.loads(r.read())) def read_meta_key(self,", "s = k.get_contents_as_string() print 'read_test_data=', s test.from_json(s) def is_preparing(self): return", "= RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES']) try: self.ceph_conf = self.config.rados.ceph_conf except:", "secret_key, host=host, port=port, is_secure=is_secure, calling_format = boto.s3.connection.OrdinaryCallingFormat(), ) def create_bucket(self,", "get_bucket_entrypoint(self, bucket_name): return self.read_meta_key('bucket:' + bucket_name) def get_bucket_instance_info(self, bucket_name, bucket_id", "bucket_info) def create_raw_bucket(self, name): return self.conn.regular.create_bucket(name) def get_raw_bucket(self, name): return", "RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES']) try: self.ceph_conf = self.config.rados.ceph_conf except: raise", "if step == 'prepare': self.do_preparing = True self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf'))", "RPlacementRule(self.bucket_info.placement_rule) self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule) except: pass def get_data_pool(self): try: #", "self.save() if suite.is_checking(): self.load() self.check() def read_config(fp): config = bunch.Bunch()", "self.config.has_key(section_type): self.config[section_type] = bunch.Bunch() self.config[section_type][name] = bunch.Bunch() cur = self.config[section_type]", "def __init__(self): self._name = self.__class__.__name__ self.r_buckets = [] self.init() def", "secret_key, host, port, is_secure): self.host = host self.port = port", "= placement_id if placement_id is None or placement_id == '':", "import boto import boto.s3.connection import json import inspect import pickle", "None or placement_id == '': print 'zone_params=', self.zone_params plid =", "import _make_admin_request ragweed_env = None suite = None class RGWConnection:", "= yaml.safe_load_all(fp) for new in g: print bunch.bunchify(new) config.update(bunch.bunchify(new)) return", "suite.is_preparing(): self.prepare() self.save() if suite.is_checking(): self.load() self.check() def read_config(fp): config", "aws_secret_access_key = secret_key, host=host, port=port, is_secure=is_secure, calling_format = boto.s3.connection.OrdinaryCallingFormat(), )", "return result.data.bucket_info def check_bucket_index(self, bucket_name): return self.get_resource('/admin/bucket',{'index' : None, 'bucket':bucket_name})", "bucket_name): return self.read_meta_key('bucket:' + bucket_name) def get_bucket_instance_info(self, bucket_name, bucket_id =", "d: return pickle.loads(str(d['__pickle'])) return d class RPlacementRule: def __init__(self, rule):", "pass return None def get_default_placement(self): return get_placement_target(self.zone_params.default_placement) def create_bucket(self, name):", "in str_config_opts: try: cur[name][var] = cfg.get(section, var) except ConfigParser.NoOptionError: pass", "self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf')) if step == 'check' or step ==", "def create_bucket(self, name): bucket = self.create_raw_bucket(name) bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name) print", "self.data_extra_pool = config.data_extra_pool self.storage_classes = RStorageClasses(config) if not self.data_extra_pool: self.data_extra_pool", "explicit_pool is not None and explicit_pool != '': return explicit_pool", "RGWConnection: def __init__(self, access_key, secret_key, host, port, is_secure): self.host =", "except: pass pr = RPlacementRule(placement_rule) return self.placement_target.get_data_pool(pr) class RStorageClasses: def", "self.config[section_type] = bunch.Bunch() self.config[section_type][name] = bunch.Bunch() cur = self.config[section_type] except", "def from_json(self, s): j = json.loads(s, object_hook=rtest_decode_json) for e in", "= [ 'user_id', 'access_key', 'secret_key', 'host', 'ceph_conf', 'bucket_prefix', ] int_config_opts", "RPlacementTarget: def __init__(self, name, config): self.name = name self.index_pool =", "get_all(self): for (name, _) in self.storage_classes.iteritems(): yield name class RPlacementTarget:", "str, unicode, int, float, bool, type(None))): return JSONEncoder.default(self, obj) return", "= self.config.rgw try: self.bucket_prefix = rgw_conf.bucket_prefix except: self.bucket_prefix = 'ragweed'", "# old style explicit pool explicit_pool = self.bucket_info.bucket.pool except: #", "class RPlacementRule: def __init__(self, rule): r = rule.split('/', 1) self.placement_id", "self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD') def get_data_pool(self, placement_rule): return self.storage_classes.get(placement_rule.storage_class).data_pool class RZone:", "'read_test_data=', s test.from_json(s) def is_preparing(self): return self.do_preparing def is_checking(self): return", "'/' + key.bucket.name + '/' + key.name params = {'layout':", "try: # new style return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool except: pass try: #", "cfg.getboolean(section, var) except ConfigParser.NoOptionError: pass print json.dumps(self.config) rgw_conf = self.config.rgw", "= bucket_info try: self.placement_rule = RPlacementRule(self.bucket_info.placement_rule) self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule) except:" ]
[]
[ "<filename>opensteer/teams/admin.py from django.contrib import admin from opensteer.teams.models import Team, Member", "from django.contrib import admin from opensteer.teams.models import Team, Member admin.site.register(Team)", "django.contrib import admin from opensteer.teams.models import Team, Member admin.site.register(Team) admin.site.register(Member)" ]
[ "\"current_center\": [20, 0], \"expect\": True}, \"left\": {\"prev_center\": [10, 0], \"current_center\":", "is_intersect # pylint:disable=unexpected-keyword-arg class TestCheckDirection: def test_true(self): \"\"\"Test true case.\"\"\"", "[0, 0], \"current_center\": [10, 0], \"expect\": False}, # This is", "0], \"expect\": True}, \"top\": {\"prev_center\": [0, 10], \"current_center\": [0, 0],", "10], \"expect\": False}, # This is top. \"bottom\": {\"prev_center\": [0,", "10], \"current_center\": [0, 0], \"expect\": False}, } for direction_str, args", "{\"prev_center\": [0, 10], \"current_center\": [0, 0]}, # Top. {\"prev_center\": [0,", "directions = { \"right\": {\"prev_center\": [0, 0], \"current_center\": [20, 0],", "if always return true when direction is set None.\"\"\" args", "= check_direction(**arg, direction=None) assert result == True class TestIsIntersect: def", "direction_config, is_intersect # pylint:disable=unexpected-keyword-arg class TestCheckDirection: def test_true(self): \"\"\"Test true", "\"current_center\": [0, 10], \"expect\": False}, # This is top. \"bottom\":", "src.utils import check_direction, direction_config, is_intersect # pylint:disable=unexpected-keyword-arg class TestCheckDirection: def", "return true when direction is set None.\"\"\" args = [", "args = [ {\"prev_center\": [0, 0], \"current_center\": [0, 0]}, #", "\"top\": {\"prev_center\": [0, 10], \"current_center\": [0, 0], \"expect\": True}, \"bottom\":", "# Bottom. ] for arg in args: # If the", "test_false(self): \"\"\"Test false case.\"\"\" args = {\"A\": [10, 0], \"B\":", "\"left\": {\"prev_center\": [0, 0], \"current_center\": [10, 0], \"expect\": False}, #", "0], \"current_center\": [0, 0], \"expect\": False}, # This is right.", "result == expect def test_direction_none(self): \"\"\"Check if always return true", "= { \"right\": {\"prev_center\": [0, 0], \"current_center\": [0, 0], \"expect\":", "Right {\"prev_center\": [10, 0], \"current_center\": [0, 0]}, # Left. {\"prev_center\":", "0], \"current_center\": [0, 0]}, # Left. {\"prev_center\": [0, 10], \"current_center\":", "= is_intersect(**args) assert result == True def test_false(self): \"\"\"Test false", "\"expect\": False}, # This is top. \"bottom\": {\"prev_center\": [0, 10],", "[0, 0]}, # Top. {\"prev_center\": [0, 0], \"current_center\": [0, 10]},", "\"current_center\": [0, 0]}, # Left. {\"prev_center\": [0, 10], \"current_center\": [0,", "{\"prev_center\": [0, 0], \"current_center\": [10, 0], \"expect\": False}, # This", "is top. \"bottom\": {\"prev_center\": [0, 10], \"current_center\": [0, 0], \"expect\":", "True class TestIsIntersect: def test_true(self): \"\"\"Test true case.\"\"\" args =", "[10, 0], \"expect\": False}, # This is bottom. \"top\": {\"prev_center\":", "10], \"D\": [30, 0]} result = is_intersect(**args) assert result ==", "[0, 10], \"current_center\": [0, 0], \"expect\": False}, } for direction_str,", "direction=direction_config[direction_str]) assert result == expect def test_direction_none(self): \"\"\"Check if always", "def test_false(self): \"\"\"Test false case.\"\"\" args = {\"A\": [10, 0],", "0], \"expect\": False}, } for direction_str, args in directions.items(): expect", "args in directions.items(): expect = args.pop(\"expect\") result = check_direction(**args, direction=direction_config[direction_str])", "# pylint:disable=unexpected-keyword-arg class TestCheckDirection: def test_true(self): \"\"\"Test true case.\"\"\" directions", "10], \"D\": [0, 0]} result = is_intersect(**args) assert result ==", "result = check_direction(**arg, direction=None) assert result == True class TestIsIntersect:", "# This is top. \"bottom\": {\"prev_center\": [0, 10], \"current_center\": [0,", "This is bottom. \"top\": {\"prev_center\": [0, 0], \"current_center\": [0, 10],", "0], \"current_center\": [20, 0], \"expect\": True}, \"left\": {\"prev_center\": [10, 0],", "TestIsIntersect: def test_true(self): \"\"\"Test true case.\"\"\" args = {\"A\": [10,", "{\"prev_center\": [0, 0], \"current_center\": [20, 0], \"expect\": True}, \"left\": {\"prev_center\":", "0], \"expect\": False}, # This is right. \"left\": {\"prev_center\": [0,", "0], \"current_center\": [0, 10], \"expect\": False}, # This is top.", "None, always return True. result = check_direction(**arg, direction=None) assert result", "{\"prev_center\": [0, 0], \"current_center\": [0, 10]}, # Bottom. ] for", "0], \"B\": [10, 30], \"C\": [0, 10], \"D\": [0, 0]}", "[0, 10], \"current_center\": [0, 0], \"expect\": True}, \"bottom\": {\"prev_center\": [0,", "{\"prev_center\": [0, 10], \"current_center\": [0, 0], \"expect\": True}, \"bottom\": {\"prev_center\":", "when direction is set None.\"\"\" args = [ {\"prev_center\": [0,", "assert result == True class TestIsIntersect: def test_true(self): \"\"\"Test true", "[0, 0], \"current_center\": [10, 0]}, # Right {\"prev_center\": [10, 0],", "30], \"C\": [0, 10], \"D\": [0, 0]} result = is_intersect(**args)", "0], \"current_center\": [0, 0]}, # No movement. {\"prev_center\": [0, 0],", "class TestCheckDirection: def test_true(self): \"\"\"Test true case.\"\"\" directions = {", "No movement. {\"prev_center\": [0, 0], \"current_center\": [10, 0]}, # Right", "in args: # If the direction is None, always return", "def test_direction_none(self): \"\"\"Check if always return true when direction is", "{\"prev_center\": [10, 0], \"current_center\": [0, 0]}, # Left. {\"prev_center\": [0,", "= args.pop(\"expect\") result = check_direction(**args, direction=direction_config[direction_str]) assert result == expect", "[0, 10], \"current_center\": [0, 0]}, # Top. {\"prev_center\": [0, 0],", "[20, 0], \"expect\": True}, \"left\": {\"prev_center\": [10, 0], \"current_center\": [0,", "True}, \"bottom\": {\"prev_center\": [0, 0], \"current_center\": [0, 10], \"expect\": True},", "{\"prev_center\": [0, 0], \"current_center\": [0, 10], \"expect\": True}, } for", "\"expect\": True}, \"left\": {\"prev_center\": [10, 0], \"current_center\": [0, 0], \"expect\":", "[10, 0], \"B\": [10, 30], \"C\": [0, 10], \"D\": [0,", "\"\"\"Test true case.\"\"\" directions = { \"right\": {\"prev_center\": [0, 0],", "# Left. {\"prev_center\": [0, 10], \"current_center\": [0, 0]}, # Top.", "is_intersect(**args) assert result == True def test_false(self): \"\"\"Test false case.\"\"\"", "\"B\": [10, 30], \"C\": [0, 10], \"D\": [30, 0]} result", "test_true(self): \"\"\"Test true case.\"\"\" directions = { \"right\": {\"prev_center\": [0,", "check_direction(**arg, direction=None) assert result == True class TestIsIntersect: def test_true(self):", "\"B\": [10, 30], \"C\": [0, 10], \"D\": [0, 0]} result", "# Right {\"prev_center\": [10, 0], \"current_center\": [0, 0]}, # Left.", "[0, 10], \"D\": [0, 0]} result = is_intersect(**args) assert result", "check_direction, direction_config, is_intersect # pylint:disable=unexpected-keyword-arg class TestCheckDirection: def test_true(self): \"\"\"Test", "\"\"\"Test false case.\"\"\" args = {\"A\": [10, 0], \"B\": [10,", "0], \"expect\": True}, \"left\": {\"prev_center\": [10, 0], \"current_center\": [0, 0],", "test_true(self): \"\"\"Test true case.\"\"\" args = {\"A\": [10, 0], \"B\":", "\"expect\": False}, # This is right. \"left\": {\"prev_center\": [0, 0],", "{ \"right\": {\"prev_center\": [0, 0], \"current_center\": [20, 0], \"expect\": True},", "\"current_center\": [0, 0]}, # No movement. {\"prev_center\": [0, 0], \"current_center\":", "[ {\"prev_center\": [0, 0], \"current_center\": [0, 0]}, # No movement.", "direction is None, always return True. result = check_direction(**arg, direction=None)", "return True. result = check_direction(**arg, direction=None) assert result == True", "result == True def test_false(self): \"\"\"Test false case.\"\"\" args =", "[0, 10]}, # Bottom. ] for arg in args: #", "result = check_direction(**args, direction=direction_config[direction_str]) assert result == expect def test_false(self):", "directions.items(): expect = args.pop(\"expect\") result = check_direction(**args, direction=direction_config[direction_str]) assert result", "TestCheckDirection: def test_true(self): \"\"\"Test true case.\"\"\" directions = { \"right\":", "\"expect\": False}, # This is bottom. \"top\": {\"prev_center\": [0, 0],", "0], \"B\": [10, 30], \"C\": [0, 10], \"D\": [30, 0]}", "\"bottom\": {\"prev_center\": [0, 10], \"current_center\": [0, 0], \"expect\": False}, }", "assert result == expect def test_direction_none(self): \"\"\"Check if always return", "[10, 0], \"current_center\": [0, 0]}, # Left. {\"prev_center\": [0, 10],", "[0, 10], \"D\": [30, 0]} result = is_intersect(**args) assert result", "0]} result = is_intersect(**args) assert result == True def test_false(self):", "\"\"\"Test false case.\"\"\" directions = { \"right\": {\"prev_center\": [0, 0],", "[10, 30], \"C\": [0, 10], \"D\": [30, 0]} result =", "\"bottom\": {\"prev_center\": [0, 0], \"current_center\": [0, 10], \"expect\": True}, }", "test_direction_none(self): \"\"\"Check if always return true when direction is set", "direction_str, args in directions.items(): expect = args.pop(\"expect\") result = check_direction(**args,", "This is right. \"left\": {\"prev_center\": [0, 0], \"current_center\": [10, 0],", "# This is bottom. \"top\": {\"prev_center\": [0, 0], \"current_center\": [0,", "is bottom. \"top\": {\"prev_center\": [0, 0], \"current_center\": [0, 10], \"expect\":", "is set None.\"\"\" args = [ {\"prev_center\": [0, 0], \"current_center\":", "arg in args: # If the direction is None, always", "\"current_center\": [0, 0], \"expect\": True}, \"top\": {\"prev_center\": [0, 10], \"current_center\":", "\"C\": [0, 10], \"D\": [0, 0]} result = is_intersect(**args) assert", "[0, 0], \"current_center\": [0, 0], \"expect\": False}, # This is", "\"top\": {\"prev_center\": [0, 0], \"current_center\": [0, 10], \"expect\": False}, #", "import check_direction, direction_config, is_intersect # pylint:disable=unexpected-keyword-arg class TestCheckDirection: def test_true(self):", "{ \"right\": {\"prev_center\": [0, 0], \"current_center\": [0, 0], \"expect\": False},", "[0, 0], \"expect\": False}, # This is right. \"left\": {\"prev_center\":", "result == True class TestIsIntersect: def test_true(self): \"\"\"Test true case.\"\"\"", "This is top. \"bottom\": {\"prev_center\": [0, 10], \"current_center\": [0, 0],", "set None.\"\"\" args = [ {\"prev_center\": [0, 0], \"current_center\": [0,", "[10, 30], \"C\": [0, 10], \"D\": [0, 0]} result =", "False}, # This is bottom. \"top\": {\"prev_center\": [0, 0], \"current_center\":", "args = {\"A\": [10, 0], \"B\": [10, 30], \"C\": [0,", "from src.utils import check_direction, direction_config, is_intersect # pylint:disable=unexpected-keyword-arg class TestCheckDirection:", "Top. {\"prev_center\": [0, 0], \"current_center\": [0, 10]}, # Bottom. ]", "args.pop(\"expect\") result = check_direction(**args, direction=direction_config[direction_str]) assert result == expect def", "10], \"current_center\": [0, 0]}, # Top. {\"prev_center\": [0, 0], \"current_center\":", "= {\"A\": [10, 0], \"B\": [10, 30], \"C\": [0, 10],", "\"D\": [30, 0]} result = is_intersect(**args) assert result == True", "{\"prev_center\": [0, 10], \"current_center\": [0, 0], \"expect\": False}, } for", "\"expect\": True}, \"top\": {\"prev_center\": [0, 10], \"current_center\": [0, 0], \"expect\":", "result = check_direction(**args, direction=direction_config[direction_str]) assert result == expect def test_direction_none(self):", "10]}, # Bottom. ] for arg in args: # If", "10], \"expect\": True}, } for direction_str, args in directions.items(): expect", "expect def test_false(self): \"\"\"Test false case.\"\"\" directions = { \"right\":", "True}, \"top\": {\"prev_center\": [0, 10], \"current_center\": [0, 0], \"expect\": True},", "\"expect\": True}, \"bottom\": {\"prev_center\": [0, 0], \"current_center\": [0, 10], \"expect\":", "false case.\"\"\" args = {\"A\": [10, 0], \"B\": [10, 30],", "0], \"expect\": False}, # This is bottom. \"top\": {\"prev_center\": [0,", "\"left\": {\"prev_center\": [10, 0], \"current_center\": [0, 0], \"expect\": True}, \"top\":", "\"current_center\": [0, 0], \"expect\": False}, # This is right. \"left\":", "is right. \"left\": {\"prev_center\": [0, 0], \"current_center\": [10, 0], \"expect\":", "[0, 0], \"current_center\": [0, 10], \"expect\": False}, # This is", "True def test_false(self): \"\"\"Test false case.\"\"\" args = {\"A\": [10,", "[0, 0], \"expect\": False}, } for direction_str, args in directions.items():", "\"current_center\": [0, 0], \"expect\": True}, \"bottom\": {\"prev_center\": [0, 0], \"current_center\":", "# No movement. {\"prev_center\": [0, 0], \"current_center\": [10, 0]}, #", "{\"prev_center\": [0, 0], \"current_center\": [0, 10], \"expect\": False}, # This", "\"D\": [0, 0]} result = is_intersect(**args) assert result == False", "expect def test_direction_none(self): \"\"\"Check if always return true when direction", "always return True. result = check_direction(**arg, direction=None) assert result ==", "case.\"\"\" directions = { \"right\": {\"prev_center\": [0, 0], \"current_center\": [0,", "False}, # This is top. \"bottom\": {\"prev_center\": [0, 10], \"current_center\":", "def test_true(self): \"\"\"Test true case.\"\"\" directions = { \"right\": {\"prev_center\":", "\"\"\"Check if always return true when direction is set None.\"\"\"", "10], \"current_center\": [0, 0], \"expect\": True}, \"bottom\": {\"prev_center\": [0, 0],", "direction=direction_config[direction_str]) assert result == expect def test_false(self): \"\"\"Test false case.\"\"\"", "result == expect def test_false(self): \"\"\"Test false case.\"\"\" directions =", "top. \"bottom\": {\"prev_center\": [0, 10], \"current_center\": [0, 0], \"expect\": False},", "False}, # This is right. \"left\": {\"prev_center\": [0, 0], \"current_center\":", "assert result == expect def test_false(self): \"\"\"Test false case.\"\"\" directions", "Bottom. ] for arg in args: # If the direction", "\"current_center\": [0, 10], \"expect\": True}, } for direction_str, args in", "case.\"\"\" args = {\"A\": [10, 0], \"B\": [10, 30], \"C\":", "\"\"\"Test true case.\"\"\" args = {\"A\": [10, 0], \"B\": [10,", "[0, 0], \"current_center\": [0, 10], \"expect\": True}, } for direction_str,", "= { \"right\": {\"prev_center\": [0, 0], \"current_center\": [20, 0], \"expect\":", "Left. {\"prev_center\": [0, 10], \"current_center\": [0, 0]}, # Top. {\"prev_center\":", "# Top. {\"prev_center\": [0, 0], \"current_center\": [0, 10]}, # Bottom.", "\"expect\": True}, } for direction_str, args in directions.items(): expect =", "always return true when direction is set None.\"\"\" args =", "direction is set None.\"\"\" args = [ {\"prev_center\": [0, 0],", "# If the direction is None, always return True. result", "direction=None) assert result == True class TestIsIntersect: def test_true(self): \"\"\"Test", "== True class TestIsIntersect: def test_true(self): \"\"\"Test true case.\"\"\" args", "\"right\": {\"prev_center\": [0, 0], \"current_center\": [20, 0], \"expect\": True}, \"left\":", "= check_direction(**args, direction=direction_config[direction_str]) assert result == expect def test_direction_none(self): \"\"\"Check", "for arg in args: # If the direction is None,", "0], \"current_center\": [0, 10], \"expect\": True}, } for direction_str, args", "{\"prev_center\": [0, 0], \"current_center\": [0, 0], \"expect\": False}, # This", "case.\"\"\" directions = { \"right\": {\"prev_center\": [0, 0], \"current_center\": [20,", "True}, } for direction_str, args in directions.items(): expect = args.pop(\"expect\")", "def test_true(self): \"\"\"Test true case.\"\"\" args = {\"A\": [10, 0],", "{\"prev_center\": [0, 0], \"current_center\": [0, 0]}, # No movement. {\"prev_center\":", "args: # If the direction is None, always return True.", "\"C\": [0, 10], \"D\": [30, 0]} result = is_intersect(**args) assert", "0]}, # Right {\"prev_center\": [10, 0], \"current_center\": [0, 0]}, #", "True}, \"left\": {\"prev_center\": [10, 0], \"current_center\": [0, 0], \"expect\": True},", "# This is right. \"left\": {\"prev_center\": [0, 0], \"current_center\": [10,", "0], \"current_center\": [10, 0]}, # Right {\"prev_center\": [10, 0], \"current_center\":", "\"current_center\": [10, 0]}, # Right {\"prev_center\": [10, 0], \"current_center\": [0,", "[0, 0], \"expect\": True}, \"top\": {\"prev_center\": [0, 10], \"current_center\": [0,", "] for arg in args: # If the direction is", "[0, 10], \"expect\": False}, # This is top. \"bottom\": {\"prev_center\":", "} for direction_str, args in directions.items(): expect = args.pop(\"expect\") result", "is None, always return True. result = check_direction(**arg, direction=None) assert", "false case.\"\"\" directions = { \"right\": {\"prev_center\": [0, 0], \"current_center\":", "expect = args.pop(\"expect\") result = check_direction(**args, direction=direction_config[direction_str]) assert result ==", "\"expect\": False}, } for direction_str, args in directions.items(): expect =", "check_direction(**args, direction=direction_config[direction_str]) assert result == expect def test_direction_none(self): \"\"\"Check if", "0]}, # No movement. {\"prev_center\": [0, 0], \"current_center\": [10, 0]},", "for direction_str, args in directions.items(): expect = args.pop(\"expect\") result =", "assert result == True def test_false(self): \"\"\"Test false case.\"\"\" args", "class TestIsIntersect: def test_true(self): \"\"\"Test true case.\"\"\" args = {\"A\":", "0]}, # Left. {\"prev_center\": [0, 10], \"current_center\": [0, 0]}, #", "\"right\": {\"prev_center\": [0, 0], \"current_center\": [0, 0], \"expect\": False}, #", "movement. {\"prev_center\": [0, 0], \"current_center\": [10, 0]}, # Right {\"prev_center\":", "{\"prev_center\": [0, 0], \"current_center\": [10, 0]}, # Right {\"prev_center\": [10,", "bottom. \"top\": {\"prev_center\": [0, 0], \"current_center\": [0, 10], \"expect\": False},", "None.\"\"\" args = [ {\"prev_center\": [0, 0], \"current_center\": [0, 0]},", "0]}, # Top. {\"prev_center\": [0, 0], \"current_center\": [0, 10]}, #", "True. result = check_direction(**arg, direction=None) assert result == True class", "= [ {\"prev_center\": [0, 0], \"current_center\": [0, 0]}, # No", "test_false(self): \"\"\"Test false case.\"\"\" directions = { \"right\": {\"prev_center\": [0,", "pylint:disable=unexpected-keyword-arg class TestCheckDirection: def test_true(self): \"\"\"Test true case.\"\"\" directions =", "<gh_stars>1-10 from src.utils import check_direction, direction_config, is_intersect # pylint:disable=unexpected-keyword-arg class", "{\"prev_center\": [10, 0], \"current_center\": [0, 0], \"expect\": True}, \"top\": {\"prev_center\":", "== True def test_false(self): \"\"\"Test false case.\"\"\" args = {\"A\":", "\"current_center\": [0, 0], \"expect\": False}, } for direction_str, args in", "[0, 0], \"expect\": True}, \"bottom\": {\"prev_center\": [0, 0], \"current_center\": [0,", "\"current_center\": [0, 0]}, # Top. {\"prev_center\": [0, 0], \"current_center\": [0,", "If the direction is None, always return True. result =", "in directions.items(): expect = args.pop(\"expect\") result = check_direction(**args, direction=direction_config[direction_str]) assert", "False}, } for direction_str, args in directions.items(): expect = args.pop(\"expect\")", "[0, 10], \"expect\": True}, } for direction_str, args in directions.items():", "check_direction(**args, direction=direction_config[direction_str]) assert result == expect def test_false(self): \"\"\"Test false", "{\"A\": [10, 0], \"B\": [10, 30], \"C\": [0, 10], \"D\":", "result = is_intersect(**args) assert result == True def test_false(self): \"\"\"Test", "[0, 0], \"current_center\": [20, 0], \"expect\": True}, \"left\": {\"prev_center\": [10,", "[10, 0]}, # Right {\"prev_center\": [10, 0], \"current_center\": [0, 0]},", "30], \"C\": [0, 10], \"D\": [30, 0]} result = is_intersect(**args)", "right. \"left\": {\"prev_center\": [0, 0], \"current_center\": [10, 0], \"expect\": False},", "0], \"current_center\": [0, 0], \"expect\": True}, \"top\": {\"prev_center\": [0, 10],", "0], \"current_center\": [10, 0], \"expect\": False}, # This is bottom.", "0], \"expect\": True}, \"bottom\": {\"prev_center\": [0, 0], \"current_center\": [0, 10],", "directions = { \"right\": {\"prev_center\": [0, 0], \"current_center\": [0, 0],", "true when direction is set None.\"\"\" args = [ {\"prev_center\":", "[0, 0]}, # Left. {\"prev_center\": [0, 10], \"current_center\": [0, 0]},", "the direction is None, always return True. result = check_direction(**arg,", "[0, 0]}, # No movement. {\"prev_center\": [0, 0], \"current_center\": [10,", "[30, 0]} result = is_intersect(**args) assert result == True def", "true case.\"\"\" directions = { \"right\": {\"prev_center\": [0, 0], \"current_center\":", "0], \"current_center\": [0, 10]}, # Bottom. ] for arg in", "[0, 0], \"current_center\": [0, 10]}, # Bottom. ] for arg", "[10, 0], \"B\": [10, 30], \"C\": [0, 10], \"D\": [30,", "== expect def test_false(self): \"\"\"Test false case.\"\"\" directions = {", "== expect def test_direction_none(self): \"\"\"Check if always return true when", "true case.\"\"\" args = {\"A\": [10, 0], \"B\": [10, 30],", "[0, 0], \"current_center\": [0, 0]}, # No movement. {\"prev_center\": [0,", "\"current_center\": [10, 0], \"expect\": False}, # This is bottom. \"top\":", "\"current_center\": [0, 10]}, # Bottom. ] for arg in args:", "def test_false(self): \"\"\"Test false case.\"\"\" directions = { \"right\": {\"prev_center\":", "[10, 0], \"current_center\": [0, 0], \"expect\": True}, \"top\": {\"prev_center\": [0,", "= check_direction(**args, direction=direction_config[direction_str]) assert result == expect def test_false(self): \"\"\"Test" ]
[]
[ "3) ps2 = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps1, ps2) self.assertEqual(len(ParameterSet.all()),", "def test_is_finished(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.is_finished(), True)", "ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.is_finished(), True) runs = ps.create_runs_upto(1) self.assertFalse(ps.is_finished())", "0, 3, 111, 222) self.assertEqual(ps.average_results(), (2.0, 3.0, 4.0)) def test_all(self):", "ps2]) self.assertEqual(len(ParameterSet.all()), 2) def test_find(self): ps = ParameterSet.find_or_create(0, 1, 2,", "runs], [0, 1, 2]) self.assertEqual([r.seed for r in runs], [0,", "5)) self.assertEqual(ps.id, 500) self.assertEqual(ps.params, (2, 3, 4, 5)) self.assertEqual(ps.run_ids, [])", "3, 111, 222) self.assertTrue(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 1) def test_average_results(self): ps =", "r) in enumerate(runs): r.store_result([1.0 + i, 2.0 + i, 3.0", "<reponame>crest-cassia/caravan_search_engine import unittest from caravan.tables import Tables from caravan.parameter_set import", "self.assertEqual([r.id for r in runs], [3, 4, 5]) self.assertEqual([r.seed for", "ParameterSet.find_or_create(0, 1, 3, 4) runs = ps2.create_runs_upto(3) self.assertEqual([r.id for r", "1, 2, 3) self.assertEqual(ParameterSet.all(), [ps]) ps2 = ParameterSet.find_or_create(0, 1, 2,", "2, 3) self.assertEqual(ps1, ps2) self.assertEqual(len(ParameterSet.all()), 1) def test_create_runs(self): ps =", "+ i, 2.0 + i, 3.0 + 1], 0, 3,", "caravan.tables import Tables from caravan.parameter_set import ParameterSet class ParameterSetTest(unittest.TestCase): def", "= Tables.get() self.t.clear() def test_ps(self): ps = ParameterSet(500, (2, 3,", "4, 5]) self.assertEqual([r.seed for r in runs], [0, 1, 2])", "r in runs], [3, 4, 5]) self.assertEqual([r.seed for r in", "1, 2, 3) ps2 = ParameterSet.find_or_create(0, 1, 2, 4) pid", "[ps]) ps2 = ParameterSet.find_or_create(0, 1, 2, 4) self.assertEqual(ParameterSet.all(), [ps, ps2])", "pid = ps2.id self.assertEqual(pid, 1) self.assertEqual(ParameterSet.find(1), ps2) if __name__ ==", "ParameterSet.find_or_create(0, 1, 2, 3) runs = ps.create_runs_upto(3) self.assertEqual([r.id for r", "ps2.create_runs_upto(3) self.assertEqual([r.id for r in runs], [3, 4, 5]) self.assertEqual([r.seed", "ps = ParameterSet.find_or_create(0, 1, 2, 3) runs = ps.create_runs_upto(3) self.assertEqual(ps.average_results(),", "3, 4, 5)) self.assertEqual(ps.id, 500) self.assertEqual(ps.params, (2, 3, 4, 5))", "2]) ps2 = ParameterSet.find_or_create(0, 1, 3, 4) runs = ps2.create_runs_upto(3)", "6) self.assertEqual(len(ParameterSet.all()), 2) def test_find_or_create(self): ps1 = ParameterSet.find_or_create(0, 1, 2,", "2, 3) self.assertEqual(ParameterSet.all(), [ps]) ps2 = ParameterSet.find_or_create(0, 1, 2, 4)", "import ParameterSet class ParameterSetTest(unittest.TestCase): def setUp(self): self.t = Tables.get() self.t.clear()", "2]) def test_is_finished(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.is_finished(),", "1) def test_average_results(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) runs", "1, 2, 4) pid = ps2.id self.assertEqual(pid, 1) self.assertEqual(ParameterSet.find(1), ps2)", "[3, 4, 5]) self.assertEqual([r.seed for r in runs], [0, 1,", "ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ParameterSet.all(), [ps]) ps2 =", "= ps.create_runs_upto(3) self.assertEqual([r.id for r in runs], [0, 1, 2])", "2) def test_find_or_create(self): ps1 = ParameterSet.find_or_create(0, 1, 2, 3) ps2", "3) self.assertEqual(ParameterSet.all(), [ps]) ps2 = ParameterSet.find_or_create(0, 1, 2, 4) self.assertEqual(ParameterSet.all(),", "self.assertEqual(ps.params, (0, 1, 2, 3)) self.assertEqual(len(ParameterSet.all()), 1) ps2 = ParameterSet.find_or_create(3,", "r in runs], [0, 1, 2]) def test_is_finished(self): ps =", "3.0, 4.0)) def test_all(self): ps = ParameterSet.find_or_create(0, 1, 2, 3)", "self.assertEqual([r.id for r in runs], [0, 1, 2]) self.assertEqual([r.seed for", "1, 2]) self.assertEqual([r.seed for r in runs], [0, 1, 2])", "for r in runs], [3, 4, 5]) self.assertEqual([r.seed for r", "+ 1], 0, 3, 111, 222) self.assertEqual(ps.average_results(), (2.0, 3.0, 4.0))", "1) def test_create_runs(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) runs", "[0, 1, 2]) def test_is_finished(self): ps = ParameterSet.find_or_create(0, 1, 2,", "5)) self.assertEqual(ps.run_ids, []) def test_find_or_create(self): ps = ParameterSet.find_or_create(0, 1, 2,", "ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.id, 0) self.assertEqual(ps.params, (0,", "self.assertEqual(ps.is_finished(), True) runs = ps.create_runs_upto(1) self.assertFalse(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 0) runs[0].store_result([1.0, 2.0,", "ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.is_finished(), True) runs =", "setUp(self): self.t = Tables.get() self.t.clear() def test_ps(self): ps = ParameterSet(500,", "= ParameterSet.find_or_create(0, 1, 2, 3) runs = ps.create_runs_upto(3) self.assertEqual([r.id for", "self.assertEqual(len(ps.finished_runs()), 1) def test_average_results(self): ps = ParameterSet.find_or_create(0, 1, 2, 3)", "r.store_result([1.0 + i, 2.0 + i, 3.0 + 1], 0,", "ps1 = ParameterSet.find_or_create(0, 1, 2, 3) ps2 = ParameterSet.find_or_create(0, 1,", "2.0, 3.0], 0, 3, 111, 222) self.assertTrue(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 1) def", "= ps.create_runs_upto(3) self.assertEqual(ps.average_results(), ()) for (i, r) in enumerate(runs): r.store_result([1.0", "Tables.get() self.t.clear() def test_ps(self): ps = ParameterSet(500, (2, 3, 4,", "self.assertEqual(ps.id, 500) self.assertEqual(ps.params, (2, 3, 4, 5)) self.assertEqual(ps.run_ids, []) def", "self.assertEqual(ps.id, 0) self.assertEqual(ps.params, (0, 1, 2, 3)) self.assertEqual(len(ParameterSet.all()), 1) ps2", "4, 5)) self.assertEqual(ps.run_ids, []) def test_find_or_create(self): ps = ParameterSet.find_or_create(0, 1,", "= ParameterSet.find_or_create(3, 4, 5, 6) self.assertEqual(len(ParameterSet.all()), 2) def test_find_or_create(self): ps1", "from caravan.tables import Tables from caravan.parameter_set import ParameterSet class ParameterSetTest(unittest.TestCase):", "[0, 1, 2]) ps2 = ParameterSet.find_or_create(0, 1, 3, 4) runs", "in runs], [3, 4, 5]) self.assertEqual([r.seed for r in runs],", "ps2) self.assertEqual(len(ParameterSet.all()), 1) def test_create_runs(self): ps = ParameterSet.find_or_create(0, 1, 2,", "r in runs], [0, 1, 2]) self.assertEqual([r.seed for r in", "(2.0, 3.0, 4.0)) def test_all(self): ps = ParameterSet.find_or_create(0, 1, 2,", "from caravan.parameter_set import ParameterSet class ParameterSetTest(unittest.TestCase): def setUp(self): self.t =", "[ps, ps2]) self.assertEqual(len(ParameterSet.all()), 2) def test_find(self): ps = ParameterSet.find_or_create(0, 1,", "def test_all(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ParameterSet.all(), [ps])", "500) self.assertEqual(ps.params, (2, 3, 4, 5)) self.assertEqual(ps.run_ids, []) def test_find_or_create(self):", "test_is_finished(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.is_finished(), True) runs", "self.assertEqual(ps.params, (2, 3, 4, 5)) self.assertEqual(ps.run_ids, []) def test_find_or_create(self): ps", "2, 3) ps2 = ParameterSet.find_or_create(0, 1, 2, 4) pid =", "4, 5, 6) self.assertEqual(len(ParameterSet.all()), 2) def test_find_or_create(self): ps1 = ParameterSet.find_or_create(0,", "self.assertEqual(len(ParameterSet.all()), 1) ps2 = ParameterSet.find_or_create(3, 4, 5, 6) self.assertEqual(len(ParameterSet.all()), 2)", "enumerate(runs): r.store_result([1.0 + i, 2.0 + i, 3.0 + 1],", "ParameterSet.find_or_create(0, 1, 2, 3) ps2 = ParameterSet.find_or_create(0, 1, 2, 4)", "in runs], [0, 1, 2]) ps2 = ParameterSet.find_or_create(0, 1, 3,", "ParameterSet class ParameterSetTest(unittest.TestCase): def setUp(self): self.t = Tables.get() self.t.clear() def", "2]) self.assertEqual([r.seed for r in runs], [0, 1, 2]) ps2", "self.assertEqual(ps.run_ids, []) def test_find_or_create(self): ps = ParameterSet.find_or_create(0, 1, 2, 3)", "(0, 1, 2, 3)) self.assertEqual(len(ParameterSet.all()), 1) ps2 = ParameterSet.find_or_create(3, 4,", "3) self.assertEqual(ps.is_finished(), True) runs = ps.create_runs_upto(1) self.assertFalse(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 0) runs[0].store_result([1.0,", "test_create_runs(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) runs = ps.create_runs_upto(3)", "ParameterSet.find_or_create(0, 1, 2, 3) ps2 = ParameterSet.find_or_create(0, 1, 2, 3)", "self.assertTrue(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 1) def test_average_results(self): ps = ParameterSet.find_or_create(0, 1, 2,", "runs], [0, 1, 2]) def test_is_finished(self): ps = ParameterSet.find_or_create(0, 1,", "runs[0].store_result([1.0, 2.0, 3.0], 0, 3, 111, 222) self.assertTrue(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 1)", "self.assertEqual(len(ps.finished_runs()), 0) runs[0].store_result([1.0, 2.0, 3.0], 0, 3, 111, 222) self.assertTrue(ps.is_finished())", "runs], [0, 1, 2]) ps2 = ParameterSet.find_or_create(0, 1, 3, 4)", "3, 4, 5)) self.assertEqual(ps.run_ids, []) def test_find_or_create(self): ps = ParameterSet.find_or_create(0,", "5, 6) self.assertEqual(len(ParameterSet.all()), 2) def test_find_or_create(self): ps1 = ParameterSet.find_or_create(0, 1,", "ps2 = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps1, ps2) self.assertEqual(len(ParameterSet.all()), 1)", "3) runs = ps.create_runs_upto(3) self.assertEqual([r.id for r in runs], [0,", "ps = ParameterSet(500, (2, 3, 4, 5)) self.assertEqual(ps.id, 500) self.assertEqual(ps.params,", "ParameterSet.find_or_create(0, 1, 2, 4) pid = ps2.id self.assertEqual(pid, 1) self.assertEqual(ParameterSet.find(1),", "4) runs = ps2.create_runs_upto(3) self.assertEqual([r.id for r in runs], [3,", "test_ps(self): ps = ParameterSet(500, (2, 3, 4, 5)) self.assertEqual(ps.id, 500)", "self.assertEqual(len(ParameterSet.all()), 2) def test_find(self): ps = ParameterSet.find_or_create(0, 1, 2, 3)", "self.t.clear() def test_ps(self): ps = ParameterSet(500, (2, 3, 4, 5))", "self.assertEqual(ps.average_results(), (2.0, 3.0, 4.0)) def test_all(self): ps = ParameterSet.find_or_create(0, 1,", "self.assertEqual([r.seed for r in runs], [0, 1, 2]) ps2 =", "class ParameterSetTest(unittest.TestCase): def setUp(self): self.t = Tables.get() self.t.clear() def test_ps(self):", "import unittest from caravan.tables import Tables from caravan.parameter_set import ParameterSet", "self.assertEqual([r.seed for r in runs], [0, 1, 2]) def test_is_finished(self):", "i, 3.0 + 1], 0, 3, 111, 222) self.assertEqual(ps.average_results(), (2.0,", "True) runs = ps.create_runs_upto(1) self.assertFalse(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 0) runs[0].store_result([1.0, 2.0, 3.0],", "def test_find(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) ps2 =", "= ParameterSet.find_or_create(0, 1, 2, 3) ps2 = ParameterSet.find_or_create(0, 1, 2,", "ps.create_runs_upto(3) self.assertEqual([r.id for r in runs], [0, 1, 2]) self.assertEqual([r.seed", "2) def test_find(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) ps2", "runs], [3, 4, 5]) self.assertEqual([r.seed for r in runs], [0,", "for (i, r) in enumerate(runs): r.store_result([1.0 + i, 2.0 +", "[]) def test_find_or_create(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.id,", "ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.id, 0) self.assertEqual(ps.params, (0, 1, 2,", "test_all(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ParameterSet.all(), [ps]) ps2", "1, 2, 3) self.assertEqual(ps1, ps2) self.assertEqual(len(ParameterSet.all()), 1) def test_create_runs(self): ps", "ParameterSet(500, (2, 3, 4, 5)) self.assertEqual(ps.id, 500) self.assertEqual(ps.params, (2, 3,", "222) self.assertTrue(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 1) def test_average_results(self): ps = ParameterSet.find_or_create(0, 1,", "1, 2, 4) self.assertEqual(ParameterSet.all(), [ps, ps2]) self.assertEqual(len(ParameterSet.all()), 2) def test_find(self):", "runs = ps.create_runs_upto(3) self.assertEqual(ps.average_results(), ()) for (i, r) in enumerate(runs):", "2, 4) self.assertEqual(ParameterSet.all(), [ps, ps2]) self.assertEqual(len(ParameterSet.all()), 2) def test_find(self): ps", "4, 5)) self.assertEqual(ps.id, 500) self.assertEqual(ps.params, (2, 3, 4, 5)) self.assertEqual(ps.run_ids,", "= ps2.create_runs_upto(3) self.assertEqual([r.id for r in runs], [3, 4, 5])", "ps2.id self.assertEqual(pid, 1) self.assertEqual(ParameterSet.find(1), ps2) if __name__ == '__main__': unittest.main()", "def test_ps(self): ps = ParameterSet(500, (2, 3, 4, 5)) self.assertEqual(ps.id,", "test_average_results(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) runs = ps.create_runs_upto(3)", "ps.create_runs_upto(3) self.assertEqual(ps.average_results(), ()) for (i, r) in enumerate(runs): r.store_result([1.0 +", "2, 3) runs = ps.create_runs_upto(3) self.assertEqual(ps.average_results(), ()) for (i, r)", "3)) self.assertEqual(len(ParameterSet.all()), 1) ps2 = ParameterSet.find_or_create(3, 4, 5, 6) self.assertEqual(len(ParameterSet.all()),", "3, 4) runs = ps2.create_runs_upto(3) self.assertEqual([r.id for r in runs],", "1, 2, 3) self.assertEqual(ps.is_finished(), True) runs = ps.create_runs_upto(1) self.assertFalse(ps.is_finished()) self.assertEqual(len(ps.finished_runs()),", "3, 111, 222) self.assertEqual(ps.average_results(), (2.0, 3.0, 4.0)) def test_all(self): ps", "1) ps2 = ParameterSet.find_or_create(3, 4, 5, 6) self.assertEqual(len(ParameterSet.all()), 2) def", "in runs], [0, 1, 2]) self.assertEqual([r.seed for r in runs],", "= ParameterSet.find_or_create(0, 1, 2, 3) runs = ps.create_runs_upto(3) self.assertEqual(ps.average_results(), ())", "3.0 + 1], 0, 3, 111, 222) self.assertEqual(ps.average_results(), (2.0, 3.0,", "def test_average_results(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) runs =", "()) for (i, r) in enumerate(runs): r.store_result([1.0 + i, 2.0", "5]) self.assertEqual([r.seed for r in runs], [0, 1, 2]) def", "111, 222) self.assertEqual(ps.average_results(), (2.0, 3.0, 4.0)) def test_all(self): ps =", "for r in runs], [0, 1, 2]) ps2 = ParameterSet.find_or_create(0,", "111, 222) self.assertTrue(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 1) def test_average_results(self): ps = ParameterSet.find_or_create(0,", "1, 2]) ps2 = ParameterSet.find_or_create(0, 1, 3, 4) runs =", "2, 3) self.assertEqual(ps.is_finished(), True) runs = ps.create_runs_upto(1) self.assertFalse(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 0)", "3) self.assertEqual(ps.id, 0) self.assertEqual(ps.params, (0, 1, 2, 3)) self.assertEqual(len(ParameterSet.all()), 1)", "ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ParameterSet.all(), [ps]) ps2 = ParameterSet.find_or_create(0, 1,", "self.assertEqual(ParameterSet.all(), [ps]) ps2 = ParameterSet.find_or_create(0, 1, 2, 4) self.assertEqual(ParameterSet.all(), [ps,", "0) self.assertEqual(ps.params, (0, 1, 2, 3)) self.assertEqual(len(ParameterSet.all()), 1) ps2 =", "ps = ParameterSet.find_or_create(0, 1, 2, 3) runs = ps.create_runs_upto(3) self.assertEqual([r.id", "1], 0, 3, 111, 222) self.assertEqual(ps.average_results(), (2.0, 3.0, 4.0)) def", "runs = ps.create_runs_upto(3) self.assertEqual([r.id for r in runs], [0, 1,", "= ParameterSet.find_or_create(0, 1, 2, 4) self.assertEqual(ParameterSet.all(), [ps, ps2]) self.assertEqual(len(ParameterSet.all()), 2)", "3) ps2 = ParameterSet.find_or_create(0, 1, 2, 4) pid = ps2.id", "self.assertEqual(len(ParameterSet.all()), 1) def test_create_runs(self): ps = ParameterSet.find_or_create(0, 1, 2, 3)", "0) runs[0].store_result([1.0, 2.0, 3.0], 0, 3, 111, 222) self.assertTrue(ps.is_finished()) self.assertEqual(len(ps.finished_runs()),", "r in runs], [0, 1, 2]) ps2 = ParameterSet.find_or_create(0, 1,", "[0, 1, 2]) self.assertEqual([r.seed for r in runs], [0, 1,", "def test_create_runs(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) runs =", "runs = ps.create_runs_upto(1) self.assertFalse(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 0) runs[0].store_result([1.0, 2.0, 3.0], 0,", "= ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ParameterSet.all(), [ps]) ps2 = ParameterSet.find_or_create(0,", "+ i, 3.0 + 1], 0, 3, 111, 222) self.assertEqual(ps.average_results(),", "test_find(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) ps2 = ParameterSet.find_or_create(0,", "test_find_or_create(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.id, 0) self.assertEqual(ps.params,", "ps2 = ParameterSet.find_or_create(0, 1, 2, 4) self.assertEqual(ParameterSet.all(), [ps, ps2]) self.assertEqual(len(ParameterSet.all()),", "222) self.assertEqual(ps.average_results(), (2.0, 3.0, 4.0)) def test_all(self): ps = ParameterSet.find_or_create(0,", "self.assertEqual(len(ParameterSet.all()), 2) def test_find_or_create(self): ps1 = ParameterSet.find_or_create(0, 1, 2, 3)", "= ParameterSet.find_or_create(0, 1, 3, 4) runs = ps2.create_runs_upto(3) self.assertEqual([r.id for", "2.0 + i, 3.0 + 1], 0, 3, 111, 222)", "def test_find_or_create(self): ps1 = ParameterSet.find_or_create(0, 1, 2, 3) ps2 =", "in runs], [0, 1, 2]) def test_is_finished(self): ps = ParameterSet.find_or_create(0,", "3) runs = ps.create_runs_upto(3) self.assertEqual(ps.average_results(), ()) for (i, r) in", "runs = ps2.create_runs_upto(3) self.assertEqual([r.id for r in runs], [3, 4,", "ParameterSetTest(unittest.TestCase): def setUp(self): self.t = Tables.get() self.t.clear() def test_ps(self): ps", "4) pid = ps2.id self.assertEqual(pid, 1) self.assertEqual(ParameterSet.find(1), ps2) if __name__", "4.0)) def test_all(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ParameterSet.all(),", "= ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.id, 0) self.assertEqual(ps.params, (0, 1,", "= ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps1, ps2) self.assertEqual(len(ParameterSet.all()), 1) def", "= ParameterSet.find_or_create(0, 1, 2, 4) pid = ps2.id self.assertEqual(pid, 1)", "= ParameterSet(500, (2, 3, 4, 5)) self.assertEqual(ps.id, 500) self.assertEqual(ps.params, (2,", "for r in runs], [0, 1, 2]) def test_is_finished(self): ps", "2, 3) self.assertEqual(ps.id, 0) self.assertEqual(ps.params, (0, 1, 2, 3)) self.assertEqual(len(ParameterSet.all()),", "1, 2]) def test_is_finished(self): ps = ParameterSet.find_or_create(0, 1, 2, 3)", "ParameterSet.find_or_create(0, 1, 2, 4) self.assertEqual(ParameterSet.all(), [ps, ps2]) self.assertEqual(len(ParameterSet.all()), 2) def", "3.0], 0, 3, 111, 222) self.assertTrue(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 1) def test_average_results(self):", "(i, r) in enumerate(runs): r.store_result([1.0 + i, 2.0 + i,", "2, 3) ps2 = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps1, ps2)", "in enumerate(runs): r.store_result([1.0 + i, 2.0 + i, 3.0 +", "ps = ParameterSet.find_or_create(0, 1, 2, 3) ps2 = ParameterSet.find_or_create(0, 1,", "ps2 = ParameterSet.find_or_create(3, 4, 5, 6) self.assertEqual(len(ParameterSet.all()), 2) def test_find_or_create(self):", "1, 2, 3)) self.assertEqual(len(ParameterSet.all()), 1) ps2 = ParameterSet.find_or_create(3, 4, 5,", "self.assertEqual(ps1, ps2) self.assertEqual(len(ParameterSet.all()), 1) def test_create_runs(self): ps = ParameterSet.find_or_create(0, 1,", "Tables from caravan.parameter_set import ParameterSet class ParameterSetTest(unittest.TestCase): def setUp(self): self.t", "ps.create_runs_upto(1) self.assertFalse(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 0) runs[0].store_result([1.0, 2.0, 3.0], 0, 3, 111,", "= ps2.id self.assertEqual(pid, 1) self.assertEqual(ParameterSet.find(1), ps2) if __name__ == '__main__':", "import Tables from caravan.parameter_set import ParameterSet class ParameterSetTest(unittest.TestCase): def setUp(self):", "ParameterSet.find_or_create(0, 1, 2, 3) runs = ps.create_runs_upto(3) self.assertEqual(ps.average_results(), ()) for", "2, 3) runs = ps.create_runs_upto(3) self.assertEqual([r.id for r in runs],", "ParameterSet.find_or_create(3, 4, 5, 6) self.assertEqual(len(ParameterSet.all()), 2) def test_find_or_create(self): ps1 =", "def test_find_or_create(self): ps = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.id, 0)", "test_find_or_create(self): ps1 = ParameterSet.find_or_create(0, 1, 2, 3) ps2 = ParameterSet.find_or_create(0,", "self.assertFalse(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 0) runs[0].store_result([1.0, 2.0, 3.0], 0, 3, 111, 222)", "self.assertEqual(ps.average_results(), ()) for (i, r) in enumerate(runs): r.store_result([1.0 + i,", "caravan.parameter_set import ParameterSet class ParameterSetTest(unittest.TestCase): def setUp(self): self.t = Tables.get()", "i, 2.0 + i, 3.0 + 1], 0, 3, 111,", "= ps.create_runs_upto(1) self.assertFalse(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 0) runs[0].store_result([1.0, 2.0, 3.0], 0, 3,", "self.assertEqual(ParameterSet.all(), [ps, ps2]) self.assertEqual(len(ParameterSet.all()), 2) def test_find(self): ps = ParameterSet.find_or_create(0,", "ps2 = ParameterSet.find_or_create(0, 1, 3, 4) runs = ps2.create_runs_upto(3) self.assertEqual([r.id", "def setUp(self): self.t = Tables.get() self.t.clear() def test_ps(self): ps =", "3) self.assertEqual(ps1, ps2) self.assertEqual(len(ParameterSet.all()), 1) def test_create_runs(self): ps = ParameterSet.find_or_create(0,", "2, 3)) self.assertEqual(len(ParameterSet.all()), 1) ps2 = ParameterSet.find_or_create(3, 4, 5, 6)", "4) self.assertEqual(ParameterSet.all(), [ps, ps2]) self.assertEqual(len(ParameterSet.all()), 2) def test_find(self): ps =", "unittest from caravan.tables import Tables from caravan.parameter_set import ParameterSet class", "1, 2, 3) runs = ps.create_runs_upto(3) self.assertEqual([r.id for r in", "ps2 = ParameterSet.find_or_create(0, 1, 2, 4) pid = ps2.id self.assertEqual(pid,", "(2, 3, 4, 5)) self.assertEqual(ps.run_ids, []) def test_find_or_create(self): ps =", "for r in runs], [0, 1, 2]) self.assertEqual([r.seed for r", "(2, 3, 4, 5)) self.assertEqual(ps.id, 500) self.assertEqual(ps.params, (2, 3, 4,", "ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps1, ps2) self.assertEqual(len(ParameterSet.all()), 1) def test_create_runs(self):", "1, 3, 4) runs = ps2.create_runs_upto(3) self.assertEqual([r.id for r in", "1, 2, 3) self.assertEqual(ps.id, 0) self.assertEqual(ps.params, (0, 1, 2, 3))", "1, 2, 3) ps2 = ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps1,", "0, 3, 111, 222) self.assertTrue(ps.is_finished()) self.assertEqual(len(ps.finished_runs()), 1) def test_average_results(self): ps", "self.t = Tables.get() self.t.clear() def test_ps(self): ps = ParameterSet(500, (2,", "1, 2, 3) runs = ps.create_runs_upto(3) self.assertEqual(ps.average_results(), ()) for (i,", "2, 4) pid = ps2.id self.assertEqual(pid, 1) self.assertEqual(ParameterSet.find(1), ps2) if", "= ParameterSet.find_or_create(0, 1, 2, 3) self.assertEqual(ps.is_finished(), True) runs = ps.create_runs_upto(1)" ]
[ "pytest.raises(ValidationError): OverhaveS3ManagerSettings(enabled=test_s3_enabled) @pytest.mark.parametrize(\"test_s3_autocreate_buckets\", [False, True], indirect=True) @pytest.mark.parametrize(\"test_s3_enabled\", [True], indirect=True) def", "OverhaveS3ManagerSettings(enabled=test_s3_enabled) @pytest.mark.parametrize(\"test_s3_autocreate_buckets\", [False, True], indirect=True) @pytest.mark.parametrize(\"test_s3_enabled\", [True], indirect=True) def test_correct_enabled(", "OverhaveS3ManagerSettings class TestS3ManagerSettings: \"\"\" Unit tests for :class:`OverhaveS3ManagerSettings`. \"\"\" @pytest.mark.parametrize(\"test_s3_enabled\",", "bool) -> None: with pytest.raises(ValidationError): OverhaveS3ManagerSettings(enabled=test_s3_enabled) @pytest.mark.parametrize(\"test_s3_autocreate_buckets\", [False, True], indirect=True)", "[True], indirect=True) def test_correct_enabled( self, test_s3_enabled: bool, test_s3_autocreate_buckets: bool, test_s3_manager_settings:", "settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled) assert not settings.enabled assert not settings.url assert", "assert test_s3_manager_settings.access_key assert test_s3_manager_settings.secret_key assert test_s3_manager_settings.verify assert test_s3_manager_settings.autocreate_buckets == test_s3_autocreate_buckets", "@pytest.mark.parametrize(\"test_s3_enabled\", [True]) def test_empty_enabled(self, test_s3_enabled: bool) -> None: with pytest.raises(ValidationError):", "tests for :class:`OverhaveS3ManagerSettings`. \"\"\" @pytest.mark.parametrize(\"test_s3_enabled\", [False]) def test_disabled(self, test_s3_enabled: bool)", "test_s3_manager_settings: OverhaveS3ManagerSettings, ) -> None: assert test_s3_manager_settings.enabled == test_s3_enabled assert", "import pytest from pydantic import ValidationError from overhave.transport import OverhaveS3ManagerSettings", "from overhave.transport import OverhaveS3ManagerSettings class TestS3ManagerSettings: \"\"\" Unit tests for", "test_disabled(self, test_s3_enabled: bool) -> None: settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled) assert not", "for :class:`OverhaveS3ManagerSettings`. \"\"\" @pytest.mark.parametrize(\"test_s3_enabled\", [False]) def test_disabled(self, test_s3_enabled: bool) ->", "from pydantic import ValidationError from overhave.transport import OverhaveS3ManagerSettings class TestS3ManagerSettings:", "with pytest.raises(ValidationError): OverhaveS3ManagerSettings(enabled=test_s3_enabled) @pytest.mark.parametrize(\"test_s3_autocreate_buckets\", [False, True], indirect=True) @pytest.mark.parametrize(\"test_s3_enabled\", [True], indirect=True)", "-> None: settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled) assert not settings.enabled assert not", "settings.secret_key @pytest.mark.parametrize(\"test_s3_enabled\", [True]) def test_empty_enabled(self, test_s3_enabled: bool) -> None: with", "assert test_s3_manager_settings.url assert test_s3_manager_settings.access_key assert test_s3_manager_settings.secret_key assert test_s3_manager_settings.verify assert test_s3_manager_settings.autocreate_buckets", "assert not settings.enabled assert not settings.url assert not settings.access_key assert", ":class:`OverhaveS3ManagerSettings`. \"\"\" @pytest.mark.parametrize(\"test_s3_enabled\", [False]) def test_disabled(self, test_s3_enabled: bool) -> None:", ") -> None: assert test_s3_manager_settings.enabled == test_s3_enabled assert test_s3_manager_settings.url assert", "@pytest.mark.parametrize(\"test_s3_enabled\", [True], indirect=True) def test_correct_enabled( self, test_s3_enabled: bool, test_s3_autocreate_buckets: bool,", "def test_disabled(self, test_s3_enabled: bool) -> None: settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled) assert", "test_empty_enabled(self, test_s3_enabled: bool) -> None: with pytest.raises(ValidationError): OverhaveS3ManagerSettings(enabled=test_s3_enabled) @pytest.mark.parametrize(\"test_s3_autocreate_buckets\", [False,", "test_s3_autocreate_buckets: bool, test_s3_manager_settings: OverhaveS3ManagerSettings, ) -> None: assert test_s3_manager_settings.enabled ==", "None: assert test_s3_manager_settings.enabled == test_s3_enabled assert test_s3_manager_settings.url assert test_s3_manager_settings.access_key assert", "[False, True], indirect=True) @pytest.mark.parametrize(\"test_s3_enabled\", [True], indirect=True) def test_correct_enabled( self, test_s3_enabled:", "import ValidationError from overhave.transport import OverhaveS3ManagerSettings class TestS3ManagerSettings: \"\"\" Unit", "== test_s3_enabled assert test_s3_manager_settings.url assert test_s3_manager_settings.access_key assert test_s3_manager_settings.secret_key assert test_s3_manager_settings.verify", "\"\"\" @pytest.mark.parametrize(\"test_s3_enabled\", [False]) def test_disabled(self, test_s3_enabled: bool) -> None: settings", "def test_correct_enabled( self, test_s3_enabled: bool, test_s3_autocreate_buckets: bool, test_s3_manager_settings: OverhaveS3ManagerSettings, )", "not settings.access_key assert not settings.secret_key @pytest.mark.parametrize(\"test_s3_enabled\", [True]) def test_empty_enabled(self, test_s3_enabled:", "-> None: with pytest.raises(ValidationError): OverhaveS3ManagerSettings(enabled=test_s3_enabled) @pytest.mark.parametrize(\"test_s3_autocreate_buckets\", [False, True], indirect=True) @pytest.mark.parametrize(\"test_s3_enabled\",", "test_s3_manager_settings.enabled == test_s3_enabled assert test_s3_manager_settings.url assert test_s3_manager_settings.access_key assert test_s3_manager_settings.secret_key assert", "indirect=True) @pytest.mark.parametrize(\"test_s3_enabled\", [True], indirect=True) def test_correct_enabled( self, test_s3_enabled: bool, test_s3_autocreate_buckets:", "-> None: assert test_s3_manager_settings.enabled == test_s3_enabled assert test_s3_manager_settings.url assert test_s3_manager_settings.access_key", "self, test_s3_enabled: bool, test_s3_autocreate_buckets: bool, test_s3_manager_settings: OverhaveS3ManagerSettings, ) -> None:", "@pytest.mark.parametrize(\"test_s3_enabled\", [False]) def test_disabled(self, test_s3_enabled: bool) -> None: settings =", "[True]) def test_empty_enabled(self, test_s3_enabled: bool) -> None: with pytest.raises(ValidationError): OverhaveS3ManagerSettings(enabled=test_s3_enabled)", "test_s3_manager_settings.url assert test_s3_manager_settings.access_key assert test_s3_manager_settings.secret_key assert test_s3_manager_settings.verify assert test_s3_manager_settings.autocreate_buckets ==", "import OverhaveS3ManagerSettings class TestS3ManagerSettings: \"\"\" Unit tests for :class:`OverhaveS3ManagerSettings`. \"\"\"", "OverhaveS3ManagerSettings(enabled=test_s3_enabled) assert not settings.enabled assert not settings.url assert not settings.access_key", "settings.access_key assert not settings.secret_key @pytest.mark.parametrize(\"test_s3_enabled\", [True]) def test_empty_enabled(self, test_s3_enabled: bool)", "@pytest.mark.parametrize(\"test_s3_autocreate_buckets\", [False, True], indirect=True) @pytest.mark.parametrize(\"test_s3_enabled\", [True], indirect=True) def test_correct_enabled( self,", "indirect=True) def test_correct_enabled( self, test_s3_enabled: bool, test_s3_autocreate_buckets: bool, test_s3_manager_settings: OverhaveS3ManagerSettings,", "test_correct_enabled( self, test_s3_enabled: bool, test_s3_autocreate_buckets: bool, test_s3_manager_settings: OverhaveS3ManagerSettings, ) ->", "bool, test_s3_manager_settings: OverhaveS3ManagerSettings, ) -> None: assert test_s3_manager_settings.enabled == test_s3_enabled", "class TestS3ManagerSettings: \"\"\" Unit tests for :class:`OverhaveS3ManagerSettings`. \"\"\" @pytest.mark.parametrize(\"test_s3_enabled\", [False])", "[False]) def test_disabled(self, test_s3_enabled: bool) -> None: settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled)", "not settings.url assert not settings.access_key assert not settings.secret_key @pytest.mark.parametrize(\"test_s3_enabled\", [True])", "bool) -> None: settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled) assert not settings.enabled assert", "pydantic import ValidationError from overhave.transport import OverhaveS3ManagerSettings class TestS3ManagerSettings: \"\"\"", "bool, test_s3_autocreate_buckets: bool, test_s3_manager_settings: OverhaveS3ManagerSettings, ) -> None: assert test_s3_manager_settings.enabled", "test_s3_enabled assert test_s3_manager_settings.url assert test_s3_manager_settings.access_key assert test_s3_manager_settings.secret_key assert test_s3_manager_settings.verify assert", "assert not settings.access_key assert not settings.secret_key @pytest.mark.parametrize(\"test_s3_enabled\", [True]) def test_empty_enabled(self,", "test_s3_enabled: bool) -> None: settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled) assert not settings.enabled", "assert not settings.url assert not settings.access_key assert not settings.secret_key @pytest.mark.parametrize(\"test_s3_enabled\",", "pytest from pydantic import ValidationError from overhave.transport import OverhaveS3ManagerSettings class", "\"\"\" Unit tests for :class:`OverhaveS3ManagerSettings`. \"\"\" @pytest.mark.parametrize(\"test_s3_enabled\", [False]) def test_disabled(self,", "None: with pytest.raises(ValidationError): OverhaveS3ManagerSettings(enabled=test_s3_enabled) @pytest.mark.parametrize(\"test_s3_autocreate_buckets\", [False, True], indirect=True) @pytest.mark.parametrize(\"test_s3_enabled\", [True],", "not settings.enabled assert not settings.url assert not settings.access_key assert not", "None: settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled) assert not settings.enabled assert not settings.url", "def test_empty_enabled(self, test_s3_enabled: bool) -> None: with pytest.raises(ValidationError): OverhaveS3ManagerSettings(enabled=test_s3_enabled) @pytest.mark.parametrize(\"test_s3_autocreate_buckets\",", "overhave.transport import OverhaveS3ManagerSettings class TestS3ManagerSettings: \"\"\" Unit tests for :class:`OverhaveS3ManagerSettings`.", "settings.url assert not settings.access_key assert not settings.secret_key @pytest.mark.parametrize(\"test_s3_enabled\", [True]) def", "assert not settings.secret_key @pytest.mark.parametrize(\"test_s3_enabled\", [True]) def test_empty_enabled(self, test_s3_enabled: bool) ->", "OverhaveS3ManagerSettings, ) -> None: assert test_s3_manager_settings.enabled == test_s3_enabled assert test_s3_manager_settings.url", "test_s3_enabled: bool) -> None: with pytest.raises(ValidationError): OverhaveS3ManagerSettings(enabled=test_s3_enabled) @pytest.mark.parametrize(\"test_s3_autocreate_buckets\", [False, True],", "True], indirect=True) @pytest.mark.parametrize(\"test_s3_enabled\", [True], indirect=True) def test_correct_enabled( self, test_s3_enabled: bool,", "ValidationError from overhave.transport import OverhaveS3ManagerSettings class TestS3ManagerSettings: \"\"\" Unit tests", "not settings.secret_key @pytest.mark.parametrize(\"test_s3_enabled\", [True]) def test_empty_enabled(self, test_s3_enabled: bool) -> None:", "assert test_s3_manager_settings.enabled == test_s3_enabled assert test_s3_manager_settings.url assert test_s3_manager_settings.access_key assert test_s3_manager_settings.secret_key", "test_s3_enabled: bool, test_s3_autocreate_buckets: bool, test_s3_manager_settings: OverhaveS3ManagerSettings, ) -> None: assert", "= OverhaveS3ManagerSettings(enabled=test_s3_enabled) assert not settings.enabled assert not settings.url assert not", "Unit tests for :class:`OverhaveS3ManagerSettings`. \"\"\" @pytest.mark.parametrize(\"test_s3_enabled\", [False]) def test_disabled(self, test_s3_enabled:", "TestS3ManagerSettings: \"\"\" Unit tests for :class:`OverhaveS3ManagerSettings`. \"\"\" @pytest.mark.parametrize(\"test_s3_enabled\", [False]) def", "settings.enabled assert not settings.url assert not settings.access_key assert not settings.secret_key" ]
[ "\"\"\" return line def postcmd(self, stop, line): \"\"\"Hook method executed", "The `completedefault' method may be overridden to intercept completions for", "= '(Cmd) ' IDENTCHARS = string.ascii_letters + string.digits + '_'", "different completion depending upon which position the argument is in.", "prompt is generated and issued. \"\"\" return line def postcmd(self,", "intro if self.intro: self.stdout.write(str(self.intro)+\"\\n\") stop = None while not stop:", "'shell ' + line[1:] else: return None, None, line i,", "empty line repeats the last command. (Actually, it calls the", "finished.\"\"\" return stop def preloop(self): \"\"\"Hook method executed once when", "cmd, arg, line = self.parseline(line) if not line: return self.emptyline()", "cmd) except AttributeError: compfunc = self.completedefault else: compfunc = self.completenames", "it had been typed in response to the prompt. This", "list. \"\"\" return [] def completenames(self, text, *ignored): dotext =", "for commands that have no complete_ method. The data member", "line, begidx, endidx) try: return self.completion_matches[state] except IndexError: return None", "overridden via an optional argument to the cmdloop() method. The", "= PROMPT identchars = IDENTCHARS ruler = '=' lastcmd =", "self.intro = intro if self.intro: self.stdout.write(str(self.intro)+\"\\n\") stop = None while", "it returns an empty list. \"\"\" return [] def completenames(self,", "for col in range(ncols): i = row + nrows*col if", "is done by calling complete_foo() with arguments text, line, begidx,", "predefined `help' method. Given an argument `topic', it calls the", "func = getattr(self, 'do_' + cmd) except AttributeError: return self.default(line)", "line = input(self.prompt) except EOFError: line = 'EOF' else: self.editline.prompt", "'help ' + line[1:] elif line[0] == '!': if hasattr(self,", "Otherwise try to call complete_<command> to get list of completions.", "hasattr(self, 'do_shell'): line = 'shell ' + line[1:] else: return", "count from 1 upwards for nrows in range(1, len(list)): ncols", "execution hooks. The return value is a flag indicating whether", "completing commands will be done automatically, and completing of commands", "not a string for i in %s\" % \", \".join(map(str,", "a superclass of an interpreter class you define yourself in", "the text being matched, which could be used to provide", "'do_': if name == prevname: continue prevname = name cmd=name[3:]", "import editline self.editline = editline.editline(\"CMD\", self.stdin, self.stdout, sys.stderr) self.editline.rl_completer =", "= '' intro = None doc_leader = \"\" doc_header =", "pass def postloop(self): \"\"\"Hook method executed once when the cmdloop()", "line, begidx, endidx. text is string we are matching against,", "if intro is not None: self.intro = intro if self.intro:", "texts.append(x) while texts and not texts[-1]: del texts[-1] for col", "no arguments, it lists all topics with defined help_ functions,", "stripped endidx = self.editline.get_endidx() - stripped if begidx>0: cmd, args,", "respectively. \"\"\" import string, sys __all__ = [\"Cmd\"] PROMPT =", "self.default(line) else: try: func = getattr(self, 'do_' + cmd) except", "class ElCmd: \"\"\"A simple framework for writing line-oriented command interpreters.", "help with \"help cmd\".' if arg: # XXX check arg", "the input prompt is generated and issued. \"\"\" return line", "= getattr(self, 'do_' + cmd) except AttributeError: return self.default(line) return", "command interpreters. Interpreters constructed with this class obey the following", "command completion is done automatically. The optional arguments stdin and", "= 0, len(line) while i < n and line[i] in", "available. By default, it returns an empty list. \"\"\" return", "commands by the interpreter should stop. \"\"\" cmd, arg, line", "line is drawn. It defaults to \"=\". If the value", "4. Typing an empty line repeats the last command. (Actually,", "2. A command is parsed out of each line by", "self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) self.postloop() finally:", "\"Undocumented commands:\" nohelp = \"*** No help on %s\" use_rawinput", "cmds_doc.append(cmd) del help[cmd] elif getattr(self, name).__doc__: cmds_doc.append(cmd) else: cmds_undoc.append(cmd) self.stdout.write(\"%s\\n\"%str(self.doc_leader))", "`emptyline', which may be overridden in a subclass.) 5. There", "argument consisting of the remainder of the line. 4. Typing", "topics, and undocumented commands. 6. The command '?' is a", "be; see the precmd() and postcmd() methods for useful execution", "This method used to pull in base class attributes #", "command list. Otherwise try to call complete_<command> to get list", "into up to three topics; documented commands, miscellaneous help topics,", "to import editline\") pass def cmdloop(self, intro=None): \"\"\"Repeatedly issue a", "= self.completedefault else: try: compfunc = getattr(self, 'complete_' + cmd)", "getattr(self, 'help_' + arg) except AttributeError: try: doc=getattr(self, 'do_' +", "each line by collecting the prefix composed of characters in", "cmds, cmdlen, maxcol): if cmds: self.stdout.write(\"%s\\n\"%str(header)) if self.ruler: self.stdout.write(\"%s\\n\"%str(self.ruler *", "and not texts[-1]: del texts[-1] for col in range(len(texts)): texts[col]", "methods. \"\"\" prompt = PROMPT identchars = IDENTCHARS ruler =", "line): \"\"\"Hook method executed just before the command line is", "file on input is processed as the command 'EOF'. 2.", "cmd in help: cmds_doc.append(cmd) del help[cmd] elif getattr(self, name).__doc__: cmds_doc.append(cmd)", "Interpreters constructed with this class obey the following conventions: 1.", "command 'EOF'. 2. A command is parsed out of each", "'text'. If a command has not been entered, then complete", "totwidth > displaywidth: break if totwidth <= displaywidth: break else:", "function's listings of documented functions, miscellaneous topics, and undocumented functions", "passed a single argument consisting of the remainder of the", "in response to the prompt. This may be overridden, but", "else: compfunc = self.completenames self.completion_matches = compfunc(text, line, begidx, endidx)", "row + nrows*col if i >= size: break x =", "is enabled, completing commands will be done automatically, and completing", "are separated by two spaces (one was not legible enough).", "# This method used to pull in base class attributes", "input line when no command-specific complete_*() method is available. By", "data members `self.doc_header', `self.misc_header', and `self.undoc_header' set the headers used", "is not None: self.intro = intro if self.intro: self.stdout.write(str(self.intro)+\"\\n\") stop", "base class attributes # at a time dir() didn't do", "stdin else: self.stdin = sys.stdin if stdout is not None:", "is not None: self.stdout = stdout else: self.stdout = sys.stdout", "'help_' + arg) except AttributeError: try: doc=getattr(self, 'do_' + arg).__doc__", "begidx and endidx are the beginning and end indexes of", "useful as a superclass of an interpreter class you define", "of a completion key; it defaults to the Tab key.", "cmds_undoc, 15,80) def print_topics(self, header, cmds, cmdlen, maxcol): if cmds:", "superclass of an interpreter class you define yourself in order", "documented commands, miscellaneous help topics, and undocumented commands. 6. The", "= set(a[5:] for a in self.get_names() if a.startswith('help_' + args[0]))", "self.editline.get_begidx() - stripped endidx = self.editline.get_endidx() - stripped if begidx>0:", "\"\"\"Display a list of strings as a compact set of", "= intro if self.intro: self.stdout.write(str(self.intro)+\"\\n\") stop = None while not", "Each column is only as wide as necessary. Columns are", "do_bob(self, s): print(\"bob!\") def do_mods(self, s): print(sys.modules.keys()) if __name__ ==", "commands, miscellaneous help topics, and undocumented commands. 6. The command", "is entered in response to the prompt. If this method", "nonstrings: raise TypeError(\"list[i] not a string for i in %s\"", "didn't do it yet. return dir(self.__class__) def complete_help(self, *args): commands", "to a method 'do_foo()'; the do_ method is passed a", "nohelp = \"*** No help on %s\" use_rawinput = False", "stop def preloop(self): \"\"\"Hook method executed once when the cmdloop()", "line i, n = 0, len(line) while i < n", "be wrapped in a more sophisticated interface. A Cmd instance", "is dispatched to a method 'do_foo()'; the do_ method is", "interpreter should stop. \"\"\" cmd, arg, line = self.parseline(line) if", "in range(ncols): i = row + nrows*col if i >=", "self.postcmd(stop, line) self.postloop() finally: pass def precmd(self, line): \"\"\"Hook method", "if nonstrings: raise TypeError(\"list[i] not a string for i in", "in range(ncols): colwidth = 0 for row in range(nrows): i", "s): print(\"bleep!\") def do_blob(self, s): print(\"blob!\") def do_bob(self, s): print(\"bob!\")", "used to draw separator lines in the help messages. If", "line couldn't be parsed. \"\"\" line = line.strip() if not", "issue a prompt, accept input, parse an initial prefix off", "`default' method may be overridden to intercept commands for which", "self.stdout.write(\"%s\\n\"%str(\" \".join(texts))) class MyCmd(ElCmd,object): def do_bleep(self, s): print(\"bleep!\") def do_blob(self,", "except AttributeError: try: doc=getattr(self, 'do_' + arg).__doc__ if doc: self.stdout.write(\"%s\\n\"%str(doc))", "not None: self.stdout = stdout else: self.stdout = sys.stdout self.cmdqueue", "The optional arguments stdin and stdout specify alternate input and", "texts[col] = texts[col].ljust(colwidths[col]) self.stdout.write(\"%s\\n\"%str(\" \".join(texts))) class MyCmd(ElCmd,object): def do_bleep(self, s):", "self.onecmd(line) stop = self.postcmd(stop, line) self.postloop() finally: pass def precmd(self,", "a list of strings as a compact set of columns.", "is in. The `default' method may be overridden to intercept", "\"\"\"Hook method executed once when the cmdloop() method is about", "nonempty command entered. \"\"\" if self.lastcmd: return self.onecmd(self.lastcmd) def default(self,", "self.completekey = completekey if not self.use_rawinput and self.completekey: try: import", "key; it defaults to the Tab key. If completekey is", "== '': return self.default(line) else: try: func = getattr(self, 'do_'", "The command '!' is a synonym for `shell', if a", "name[:5] == 'help_': help[name[5:]]=1 names.sort() # There can be duplicates", "list[i] colwidth = max(colwidth, len(x)) colwidths.append(colwidth) totwidth += colwidth +", "commands = set(self.completenames(*args)) topics = set(a[5:] for a in self.get_names()", "printed out on interpreter startup. This value may be overridden", "line = 'help ' + line[1:] elif line[0] == '!':", "a in self.get_names() if a.startswith('help_' + args[0])) return list(commands |", "if not self.use_rawinput and self.completekey: try: import editline self.editline =", "Tab key. If completekey is not None and the readline", "def print_topics(self, header, cmds, cmdlen, maxcol): if cmds: self.stdout.write(\"%s\\n\"%str(header)) if", "range(ncols): colwidth = 0 for row in range(nrows): i =", "self.completion_matches[state] except IndexError: return None def get_names(self): # This method", "listings of documented functions, miscellaneous topics, and undocumented functions respectively.", "in self.identchars: i = i+1 cmd, arg = line[:i], line[i:].strip()", "list: self.stdout.write(\"<empty>\\n\") return nonstrings = [i for i in range(len(list))", "a compact set of columns. Each column is only as", "an optional argument to the cmdloop() method. The data members", "(lstripped), begidx and endidx are the beginning and end indexes", "as argument. \"\"\" self.preloop() try: if intro is not None:", "maxcol-1) self.stdout.write(\"\\n\") def columnize(self, list, displaywidth=80): \"\"\"Display a list of", "method is not overridden, it repeats the last nonempty command", "an argument `topic', it calls the command `help_topic'. With no", "indicating whether interpretation of commands by the interpreter should stop.", "line[0] == '!': if hasattr(self, 'do_shell'): line = 'shell '", "name[:3] == 'do_': if name == prevname: continue prevname =", "postcmd(self, stop, line): \"\"\"Hook method executed just after a command", "cmdloop() method is called.\"\"\" pass def postloop(self): \"\"\"Hook method executed", "= \"\" doc_header = \"Documented commands (type help <topic>):\" misc_header", "15,80) self.print_topics(self.misc_header, list(help.keys()),15,80) self.print_topics(self.undoc_header, cmds_undoc, 15,80) def print_topics(self, header, cmds,", "overridden to intercept completions for commands that have no complete_", "nrows = len(list) ncols = 1 colwidths = [0] for", "executed once when the cmdloop() method is about to return.", "= 'EOF' else: self.editline.prompt = self.prompt line = self.editline.readline() if", "'help_': help[name[5:]]=1 names.sort() # There can be duplicates if routines", "not isinstance(list[i], str)] if nonstrings: raise TypeError(\"list[i] not a string", "self.print_topics(self.misc_header, list(help.keys()),15,80) self.print_topics(self.undoc_header, cmds_undoc, 15,80) def print_topics(self, header, cmds, cmdlen,", "undocumented commands. 6. The command '?' is a synonym for", "continue prevname = name cmd=name[3:] if cmd in help: cmds_doc.append(cmd)", "if totwidth <= displaywidth: break else: nrows = len(list) ncols", "topics:\" undoc_header = \"Undocumented commands:\" nohelp = \"*** No help", "> displaywidth: break if totwidth <= displaywidth: break else: nrows", "\"\"\"Hook method executed just before the command line is interpreted,", "i in %s\" % \", \".join(map(str, nonstrings))) size = len(list)", "later be wrapped in a more sophisticated interface. A Cmd", "i < n and line[i] in self.identchars: i = i+1", "precmd() and postcmd() methods for useful execution hooks. The return", "just before the command line is interpreted, but after the", "begidx, endidx) try: return self.completion_matches[state] except IndexError: return None def", "+ cmd) except AttributeError: compfunc = self.completedefault else: compfunc =", "The data member `self.ruler' sets the character used to draw", "when an empty line is entered in response to the", "the line couldn't be parsed. \"\"\" line = line.strip() if", "return. \"\"\" pass def parseline(self, line): \"\"\"Parse the line into", "if cmd is None: return self.default(line) self.lastcmd = line if", "for a in self.get_names() if a.startswith(dotext)] def complete(self, text, state):", "ncols = (size+nrows-1) // nrows colwidths = [] totwidth =", "= self.complete except ImportError: print(\"Failed to import editline\") pass def", "and completing of commands args is done by calling complete_foo()", "names: if name[:5] == 'help_': help[name[5:]]=1 names.sort() # There can", "in base class attributes # at a time dir() didn't", "return self.default(line) self.lastcmd = line if line == 'EOF' :", "matching against, all returned matches must begin with it. line", "else: try: func = getattr(self, 'do_' + cmd) except AttributeError:", "not list: self.stdout.write(\"<empty>\\n\") return nonstrings = [i for i in", "pass def precmd(self, line): \"\"\"Hook method executed just before the", "executed once when the cmdloop() method is called.\"\"\" pass def", "' + line[1:] else: return None, None, line i, n", "readline name of a completion key; it defaults to the", "is no do_ method. The `completedefault' method may be overridden", "AttributeError: return self.default(line) return func(arg) def emptyline(self): \"\"\"Called when an", "'EOF' : print(\"\") print(\"Bye\") sys.exit(0) if cmd == '': return", "in self.get_names() if a.startswith('help_' + args[0])) return list(commands | topics)", "objects; if not specified, sys.stdin and sys.stdout are used. \"\"\"", "if a.startswith(dotext)] def complete(self, text, state): \"\"\"Return the next possible", "the identchars member. 3. A command `foo' is dispatched to", "which there is no do_ method. The `completedefault' method may", "There is a predefined `help' method. Given an argument `topic',", "it yet. return dir(self.__class__) def complete_help(self, *args): commands = set(self.completenames(*args))", "def preloop(self): \"\"\"Hook method executed once when the cmdloop() method", "endidx = self.editline.get_endidx() - stripped if begidx>0: cmd, args, foo", "help function's listings of documented functions, miscellaneous topics, and undocumented", "defined help_ functions, broken into up to three topics; documented", "% (arg,))) return func() else: names = self.get_names() cmds_doc =", "+ arg).__doc__ if doc: self.stdout.write(\"%s\\n\"%str(doc)) return except AttributeError: pass self.stdout.write(\"%s\\n\"%str(self.nohelp", "self.default(line) self.lastcmd = line if line == 'EOF' : print(\"\")", "colwidth = 0 for row in range(nrows): i = row", "spaces (one was not legible enough). \"\"\" if not list:", "framework. There is no good reason to instantiate Cmd itself;", "line[1:] elif line[0] == '!': if hasattr(self, 'do_shell'): line =", "often useful for test harnesses, administrative tools, and prototypes that", "name).__doc__: cmds_doc.append(cmd) else: cmds_undoc.append(cmd) self.stdout.write(\"%s\\n\"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc, 15,80) self.print_topics(self.misc_header, list(help.keys()),15,80)", "argument. \"\"\" self.preloop() try: if intro is not None: self.intro", "\"Miscellaneous help topics:\" undoc_header = \"Undocumented commands:\" nohelp = \"***", "is not None: self.stdin = stdin else: self.stdin = sys.stdin", "simple framework for writing line-oriented command interpreters. These are often", "= '=' lastcmd = '' intro = None doc_leader =", "input is processed as the command 'EOF'. 2. A command", "\"\"\" if not list: self.stdout.write(\"<empty>\\n\") return nonstrings = [i for", "if line == 'EOF' : print(\"\") print(\"Bye\") sys.exit(0) if cmd", "with defined help_ functions, broken into up to three topics;", "There can be duplicates if routines overridden prevname = ''", "prevname = '' for name in names: if name[:3] ==", "method. Given an argument `topic', it calls the command `help_topic'.", "- len(line) begidx = self.editline.get_begidx() - stripped endidx = self.editline.get_endidx()", "if cmd == '': compfunc = self.completedefault else: try: compfunc", "call complete_<command> to get list of completions. \"\"\" if state", "= name cmd=name[3:] if cmd in help: cmds_doc.append(cmd) del help[cmd]", "the line as argument. \"\"\" self.preloop() try: if intro is", "if stdin is not None: self.stdin = stdin else: self.stdin", "\"\" doc_header = \"Documented commands (type help <topic>):\" misc_header =", "not specified, sys.stdin and sys.stdout are used. \"\"\" if stdin", "to intercept commands for which there is no do_ method.", "if name == prevname: continue prevname = name cmd=name[3:] if", "= line[:i], line[i:].strip() return cmd, arg, line def onecmd(self, line):", "to \"=\". If the value of `self.intro' is nonempty when", "used to pull in base class attributes # at a", "no ruler line is drawn. It defaults to \"=\". If", "emptyline(self): \"\"\"Called when an empty line is entered in response", "`foo' is dispatched to a method 'do_foo()'; the do_ method", "command has not been entered, then complete against command list.", "executed just after a command dispatch is finished.\"\"\" return stop", "command '!' is a synonym for `shell', if a do_shell", "ruler line is drawn. It defaults to \"=\". If the", "complete_foo() with arguments text, line, begidx, endidx. text is string", "= [i for i in range(len(list)) if not isinstance(list[i], str)]", "\"\"\" if self.lastcmd: return self.onecmd(self.lastcmd) def default(self, line): \"\"\"Called on", "method 'do_foo()'; the do_ method is passed a single argument", "with \"help\" or detailed help with \"help cmd\".' if arg:", "\"\"\" line = line.strip() if not line: return None, None,", "None and the readline module is available, command completion is", "method is called.\"\"\" pass def postloop(self): \"\"\"Hook method executed once", "command `help_topic'. With no arguments, it lists all topics with", "a command has not been entered, then complete against command", "arg) except AttributeError: try: doc=getattr(self, 'do_' + arg).__doc__ if doc:", "readline module is available, command completion is done automatically. The", "A Cmd instance or subclass instance is a line-oriented interpreter", "then complete against command list. Otherwise try to call complete_<command>", "If empty, no ruler line is drawn. It defaults to", "5. There is a predefined `help' method. Given an argument", "= \"Undocumented commands:\" nohelp = \"*** No help on %s\"", "commands:\" nohelp = \"*** No help on %s\" use_rawinput =", "endidx) try: return self.completion_matches[state] except IndexError: return None def get_names(self):", "dispatched to a method 'do_foo()'; the do_ method is passed", "cmd, args, foo = self.parseline(line) if cmd == '': compfunc", "= origline.lstrip() stripped = len(origline) - len(line) begidx = self.editline.get_begidx()", "self.stdout.write(\"\\n\") def columnize(self, list, displaywidth=80): \"\"\"Display a list of strings", "and `self.undoc_header' set the headers used for the help function's", "{} for name in names: if name[:5] == 'help_': help[name[5:]]=1", "at a time dir() didn't do it yet. return dir(self.__class__)", "is done automatically. The optional arguments stdin and stdout specify", "line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line)", "method executed just before the command line is interpreted, but", "# at a time dir() didn't do it yet. return", "after a command dispatch is finished.\"\"\" return stop def preloop(self):", "line.rstrip('\\r\\n') line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop,", "optional argument 'completekey' is the readline name of a completion", "func(arg) def emptyline(self): \"\"\"Called when an empty line is entered", "about to return. \"\"\" pass def parseline(self, line): \"\"\"Parse the", "line[1:] else: return None, None, line i, n = 0,", "not line: return None, None, line elif line[0] == '?':", "+ nrows*col if i >= size: x = \"\" else:", "False def __init__(self, completekey='tab', stdin=None, stdout=None): \"\"\"Instantiate a line-oriented interpreter", "6. The command '?' is a synonym for `help'. The", "calls the method `emptyline', which may be overridden in a", "break x = list[i] colwidth = max(colwidth, len(x)) colwidths.append(colwidth) totwidth", "no do_ method. The `completedefault' method may be overridden to", "def postloop(self): \"\"\"Hook method executed once when the cmdloop() method", "'?' is a synonym for `help'. The command '!' is", "with it. line is the current input line (lstripped), begidx", "arg syntax try: func = getattr(self, 'help_' + arg) except", "yet. return dir(self.__class__) def complete_help(self, *args): commands = set(self.completenames(*args)) topics", "args, foo = self.parseline(line) if cmd == '': compfunc =", "complete_<command> to get list of completions. \"\"\" if state ==", "pass def cmdloop(self, intro=None): \"\"\"Repeatedly issue a prompt, accept input,", "command entered. \"\"\" if self.lastcmd: return self.onecmd(self.lastcmd) def default(self, line):", "characters in the identchars member. 3. A command `foo' is", "method is passed a single argument consisting of the remainder", "self.editline.prompt = self.prompt line = self.editline.readline() if not len(line): line", "line): \"\"\"Hook method executed just after a command dispatch is", "If completion is enabled, completing commands will be done automatically,", "None, None, line i, n = 0, len(line) while i", "depending upon which position the argument is in. The `default'", "%s\" % \", \".join(map(str, nonstrings))) size = len(list) if size", "the command prefix is not recognized. If this method is", "IDENTCHARS = string.ascii_letters + string.digits + '_' class ElCmd: \"\"\"A", "to complete an input line when no command-specific complete_*() method", "use_rawinput = False def __init__(self, completekey='tab', stdin=None, stdout=None): \"\"\"Instantiate a", "\"\"\" import string, sys __all__ = [\"Cmd\"] PROMPT = '(Cmd)", "print(\"blob!\") def do_bob(self, s): print(\"bob!\") def do_mods(self, s): print(sys.modules.keys()) if", "the Tab key. If completekey is not None and the", "Columns are separated by two spaces (one was not legible", "state): \"\"\"Return the next possible completion for 'text'. If a", "i = i+1 cmd, arg = line[:i], line[i:].strip() return cmd,", "for writing line-oriented command interpreters. These are often useful for", "0 for row in range(nrows): i = row + nrows*col", "methods for useful execution hooks. The return value is a", "method. The data member `self.ruler' sets the character used to", "help_ functions, broken into up to three topics; documented commands,", "identchars = IDENTCHARS ruler = '=' lastcmd = '' intro", "self.print_topics(self.undoc_header, cmds_undoc, 15,80) def print_topics(self, header, cmds, cmdlen, maxcol): if", "is not None and the readline module is available, command", "cmds: self.stdout.write(\"%s\\n\"%str(header)) if self.ruler: self.stdout.write(\"%s\\n\"%str(self.ruler * len(header))) self.columnize(cmds, maxcol-1) self.stdout.write(\"\\n\")", "the method `emptyline', which may be overridden in a subclass.)", "following conventions: 1. End of file on input is processed", "initial prefix off the received input, and dispatch to action", "framework. The optional argument 'completekey' is the readline name of", "15,80) def print_topics(self, header, cmds, cmdlen, maxcol): if cmds: self.stdout.write(\"%s\\n\"%str(header))", "a string containing the arguments. Returns a tuple containing (command,", "miscellaneous topics, and undocumented functions respectively. \"\"\" import string, sys", "totwidth <= displaywidth: break else: nrows = len(list) ncols =", "line elif line[0] == '?': line = 'help ' +", "interpreter class you define yourself in order to inherit Cmd's", "argument `topic', it calls the command `help_topic'. With no arguments,", "and stdout specify alternate input and output file objects; if", "Typing an empty line repeats the last command. (Actually, it", "line = line.strip() if not line: return None, None, line", "compfunc = self.completedefault else: compfunc = self.completenames self.completion_matches = compfunc(text,", "\"\"\"Repeatedly issue a prompt, accept input, parse an initial prefix", "tuple containing (command, args, line). 'command' and 'args' may be", "`help_topic'. With no arguments, it lists all topics with defined", "yourself in order to inherit Cmd's methods and encapsulate action", "is the readline name of a completion key; it defaults", "it's useful as a superclass of an interpreter class you", "is only as wide as necessary. Columns are separated by", "a line-oriented interpreter framework. There is no good reason to", "+ nrows*col if i >= size: break x = list[i]", "0, len(line) while i < n and line[i] in self.identchars:", "and a string containing the arguments. Returns a tuple containing", "completekey is not None and the readline module is available,", "return except AttributeError: pass self.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,))) return func() else:", "\", \".join(map(str, nonstrings))) size = len(list) if size == 1:", "possible completion for 'text'. If a command has not been", "= (size+nrows-1) // nrows colwidths = [] totwidth = -2", "(size+nrows-1) // nrows colwidths = [] totwidth = -2 for", "no complete_ method. The data member `self.ruler' sets the character", "except IndexError: return None def get_names(self): # This method used", "a.startswith('help_' + args[0])) return list(commands | topics) def do_help(self, arg):", "self.editline.get_line_buffer() line = origline.lstrip() stripped = len(origline) - len(line) begidx", "the received input, and dispatch to action methods, passing them", "received input, and dispatch to action methods, passing them the", "IDENTCHARS ruler = '=' lastcmd = '' intro = None", "\"\"\"Interpret the argument as though it had been typed in", "define yourself in order to inherit Cmd's methods and encapsulate", "[] totwidth = -2 for col in range(ncols): colwidth =", "text, *ignored): dotext = 'do_'+text return [a[3:] for a in", "to pull in base class attributes # at a time", "- stripped if begidx>0: cmd, args, foo = self.parseline(line) if", "return self.default(line) else: try: func = getattr(self, 'do_' + cmd)", "arguments, it lists all topics with defined help_ functions, broken", "module is available, command completion is done automatically. The optional", "break else: nrows = len(list) ncols = 1 colwidths =", "as the command 'EOF'. 2. A command is parsed out", "str)] if nonstrings: raise TypeError(\"list[i] not a string for i", "line when no command-specific complete_*() method is available. By default,", "inherit Cmd's methods and encapsulate action methods. \"\"\" prompt =", "is printed out on interpreter startup. This value may be", "== '': compfunc = self.completedefault else: try: compfunc = getattr(self,", "line) self.postloop() finally: pass def precmd(self, line): \"\"\"Hook method executed", "1 upwards for nrows in range(1, len(list)): ncols = (size+nrows-1)", "print(\"\") print(\"Bye\") sys.exit(0) if cmd == '': return self.default(line) else:", "= len(origline) - len(line) begidx = self.editline.get_begidx() - stripped endidx", "do_shell method exists. 7. If completion is enabled, completing commands", "into a command name and a string containing the arguments.", "None: return self.default(line) self.lastcmd = line if line == 'EOF'", "in a more sophisticated interface. A Cmd instance or subclass", "done automatically. The optional arguments stdin and stdout specify alternate", "not self.use_rawinput and self.completekey: try: import editline self.editline = editline.editline(\"CMD\",", "return self.default(line) return func(arg) def emptyline(self): \"\"\"Called when an empty", "as a compact set of columns. Each column is only", "list(commands | topics) def do_help(self, arg): 'List available commands with", "writing line-oriented command interpreters. These are often useful for test", "prompt. If this method is not overridden, it repeats the", "(Actually, it calls the method `emptyline', which may be overridden", "an empty list. \"\"\" return [] def completenames(self, text, *ignored):", "'' for name in names: if name[:3] == 'do_': if", "None doc_leader = \"\" doc_header = \"Documented commands (type help", "and undocumented commands. 6. The command '?' is a synonym", "the current input line (lstripped), begidx and endidx are the", "a do_shell method exists. 7. If completion is enabled, completing", "data member `self.ruler' sets the character used to draw separator", "it repeats the last nonempty command entered. \"\"\" if self.lastcmd:", "[a[3:] for a in self.get_names() if a.startswith(dotext)] def complete(self, text,", "doc=getattr(self, 'do_' + arg).__doc__ if doc: self.stdout.write(\"%s\\n\"%str(doc)) return except AttributeError:", "cmd == '': return self.default(line) else: try: func = getattr(self,", "If this method is not overridden, it prints an error", "cmdlen, maxcol): if cmds: self.stdout.write(\"%s\\n\"%str(header)) if self.ruler: self.stdout.write(\"%s\\n\"%str(self.ruler * len(header)))", "method executed just after a command dispatch is finished.\"\"\" return", "stdout specify alternate input and output file objects; if not", "return list(commands | topics) def do_help(self, arg): 'List available commands", "for row in range(nrows): i = row + nrows*col if", "range(nrows): i = row + nrows*col if i >= size:", "`self.ruler' sets the character used to draw separator lines in", "topics; documented commands, miscellaneous help topics, and undocumented commands. 6.", "%s (%d)\\n' % (line,len(line))) def completedefault(self, *ignored): \"\"\"Method called to", "in range(nrows): i = row + nrows*col if i >=", "them the remainder of the line as argument. \"\"\" self.preloop()", "doc_leader = \"\" doc_header = \"Documented commands (type help <topic>):\"", "intro is not None: self.intro = intro if self.intro: self.stdout.write(str(self.intro)+\"\\n\")", "have no complete_ method. The data member `self.ruler' sets the", "+ arg) except AttributeError: try: doc=getattr(self, 'do_' + arg).__doc__ if", "totwidth += colwidth + 2 if totwidth > displaywidth: break", "while i < n and line[i] in self.identchars: i =", "defaults to \"=\". If the value of `self.intro' is nonempty", "interpreter startup. This value may be overridden via an optional", "input, parse an initial prefix off the received input, and", "should stop. \"\"\" cmd, arg, line = self.parseline(line) if not", "return nonstrings = [i for i in range(len(list)) if not", "= sys.stdout self.cmdqueue = [] self.completekey = completekey if not", "else: try: compfunc = getattr(self, 'complete_' + cmd) except AttributeError:", "there is no do_ method. The `completedefault' method may be", "x = \"\" else: x = list[i] texts.append(x) while texts", "will be done automatically, and completing of commands args is", "\"\"\" self.preloop() try: if intro is not None: self.intro =", "arguments text, line, begidx, endidx. text is string we are", "Given an argument `topic', it calls the command `help_topic'. With", "= completekey if not self.use_rawinput and self.completekey: try: import editline", "after the input prompt is generated and issued. \"\"\" return", "ruler = '=' lastcmd = '' intro = None doc_leader", "method exists. 7. If completion is enabled, completing commands will", "begidx = self.editline.get_begidx() - stripped endidx = self.editline.get_endidx() - stripped", "drawn. It defaults to \"=\". If the value of `self.intro'", "is a synonym for `shell', if a do_shell method exists.", "calls the command `help_topic'. With no arguments, it lists all", "optional argument to the cmdloop() method. The data members `self.doc_header',", "set(self.completenames(*args)) topics = set(a[5:] for a in self.get_names() if a.startswith('help_'", "overridden, but should not normally need to be; see the", "and sys.stdout are used. \"\"\" if stdin is not None:", "try: doc=getattr(self, 'do_' + arg).__doc__ if doc: self.stdout.write(\"%s\\n\"%str(doc)) return except", "arg).__doc__ if doc: self.stdout.write(\"%s\\n\"%str(doc)) return except AttributeError: pass self.stdout.write(\"%s\\n\"%str(self.nohelp %", "completing of commands args is done by calling complete_foo() with", "cmds_doc.append(cmd) else: cmds_undoc.append(cmd) self.stdout.write(\"%s\\n\"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc, 15,80) self.print_topics(self.misc_header, list(help.keys()),15,80) self.print_topics(self.undoc_header,", "def emptyline(self): \"\"\"Called when an empty line is entered in", "line): \"\"\"Interpret the argument as though it had been typed", "the character used to draw separator lines in the help", "' + line[1:] elif line[0] == '!': if hasattr(self, 'do_shell'):", "state == 0: origline = self.editline.get_line_buffer() line = origline.lstrip() stripped", "on %s\" use_rawinput = False def __init__(self, completekey='tab', stdin=None, stdout=None):", "an input line when the command prefix is not recognized.", "column is only as wide as necessary. Columns are separated", "may be overridden to intercept completions for commands that have", "return self.completion_matches[state] except IndexError: return None def get_names(self): # This", "character used to draw separator lines in the help messages.", "not line: return self.emptyline() if cmd is None: return self.default(line)", "import string, sys __all__ = [\"Cmd\"] PROMPT = '(Cmd) '", "return None, None, line i, n = 0, len(line) while", "= 'shell ' + line[1:] else: return None, None, line", "3. A command `foo' is dispatched to a method 'do_foo()';", "attributes # at a time dir() didn't do it yet.", "return stop def preloop(self): \"\"\"Hook method executed once when the", "[] self.completekey = completekey if not self.use_rawinput and self.completekey: try:", "stop = self.onecmd(line) stop = self.postcmd(stop, line) self.postloop() finally: pass", "'complete_' + cmd) except AttributeError: compfunc = self.completedefault else: compfunc", "completions. \"\"\" if state == 0: origline = self.editline.get_line_buffer() line", "and end indexes of the text being matched, which could", "while not stop: if self.cmdqueue: line = self.cmdqueue.pop(0) else: if", "dir(self.__class__) def complete_help(self, *args): commands = set(self.completenames(*args)) topics = set(a[5:]", "is not recognized. If this method is not overridden, it", "before the command line is interpreted, but after the input", "processed as the command 'EOF'. 2. A command is parsed", "func = getattr(self, 'help_' + arg) except AttributeError: try: doc=getattr(self,", "be overridden via an optional argument to the cmdloop() method.", "a tuple containing (command, args, line). 'command' and 'args' may", "= 'help ' + line[1:] elif line[0] == '!': if", "help: cmds_doc.append(cmd) del help[cmd] elif getattr(self, name).__doc__: cmds_doc.append(cmd) else: cmds_undoc.append(cmd)", "the cmdloop() method is about to return. \"\"\" pass def", "not None: self.intro = intro if self.intro: self.stdout.write(str(self.intro)+\"\\n\") stop =", "for col in range(ncols): colwidth = 0 for row in", "if routines overridden prevname = '' for name in names:", "The `default' method may be overridden to intercept commands for", "= None while not stop: if self.cmdqueue: line = self.cmdqueue.pop(0)", "self.cmdqueue = [] self.completekey = completekey if not self.use_rawinput and", "XXX check arg syntax try: func = getattr(self, 'help_' +", "help <topic>):\" misc_header = \"Miscellaneous help topics:\" undoc_header = \"Undocumented", "len(header))) self.columnize(cmds, maxcol-1) self.stdout.write(\"\\n\") def columnize(self, list, displaywidth=80): \"\"\"Display a", "intercept commands for which there is no do_ method. The", "No help on %s\" use_rawinput = False def __init__(self, completekey='tab',", "else: self.stdin = sys.stdin if stdout is not None: self.stdout", "self.stdin = stdin else: self.stdin = sys.stdin if stdout is", "except AttributeError: compfunc = self.completedefault else: compfunc = self.completenames self.completion_matches", "def do_mods(self, s): print(sys.modules.keys()) if __name__ == '__main__': mc =", "the help function's listings of documented functions, miscellaneous topics, and", "methods, passing them the remainder of the line as argument.", "hooks. The return value is a flag indicating whether interpretation", "cmd=name[3:] if cmd in help: cmds_doc.append(cmd) del help[cmd] elif getattr(self,", "collecting the prefix composed of characters in the identchars member.", "not None and the readline module is available, command completion", "containing the arguments. Returns a tuple containing (command, args, line).", "it lists all topics with defined help_ functions, broken into", "completion for 'text'. If a command has not been entered,", "= compfunc(text, line, begidx, endidx) try: return self.completion_matches[state] except IndexError:", "syntax try: func = getattr(self, 'help_' + arg) except AttributeError:", "off the received input, and dispatch to action methods, passing", "%s\" use_rawinput = False def __init__(self, completekey='tab', stdin=None, stdout=None): \"\"\"Instantiate", "= input(self.prompt) except EOFError: line = 'EOF' else: self.editline.prompt =", "from 1 upwards for nrows in range(1, len(list)): ncols =", "help[cmd] elif getattr(self, name).__doc__: cmds_doc.append(cmd) else: cmds_undoc.append(cmd) self.stdout.write(\"%s\\n\"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc,", "commands for which there is no do_ method. The `completedefault'", "PROMPT = '(Cmd) ' IDENTCHARS = string.ascii_letters + string.digits +", "if stdout is not None: self.stdout = stdout else: self.stdout", "displaywidth=80): \"\"\"Display a list of strings as a compact set", "by the interpreter should stop. \"\"\" cmd, arg, line =", "cmdloop method is called, it is printed out on interpreter", "command name and a string containing the arguments. Returns a", "= 'do_'+text return [a[3:] for a in self.get_names() if a.startswith(dotext)]", "\"\"\" cmd, arg, line = self.parseline(line) if not line: return", "used to provide different completion depending upon which position the", "when the cmdloop() method is about to return. \"\"\" pass", "conventions: 1. End of file on input is processed as", "undoc_header = \"Undocumented commands:\" nohelp = \"*** No help on", "dispatch to action methods, passing them the remainder of the", "= \"Miscellaneous help topics:\" undoc_header = \"Undocumented commands:\" nohelp =", "| topics) def do_help(self, arg): 'List available commands with \"help\"", "row count from 1 upwards for nrows in range(1, len(list)):", "issued. \"\"\" return line def postcmd(self, stop, line): \"\"\"Hook method", "on interpreter startup. This value may be overridden via an", "in response to the prompt. If this method is not", "or subclass instance is a line-oriented interpreter framework. There is", "identchars member. 3. A command `foo' is dispatched to a", "s): print(\"blob!\") def do_bob(self, s): print(\"bob!\") def do_mods(self, s): print(sys.modules.keys())", "self.stdout.write('*** Unknown syntax: %s (%d)\\n' % (line,len(line))) def completedefault(self, *ignored):", "else: x = list[i] texts.append(x) while texts and not texts[-1]:", "completenames(self, text, *ignored): dotext = 'do_'+text return [a[3:] for a", "\"\" else: x = list[i] texts.append(x) while texts and not", "self.stdout.write('%s\\n'%str(list[0])) return # Try every row count from 1 upwards", "is called, it is printed out on interpreter startup. This", "interface. A Cmd instance or subclass instance is a line-oriented", "' IDENTCHARS = string.ascii_letters + string.digits + '_' class ElCmd:", "it. line is the current input line (lstripped), begidx and", "harnesses, administrative tools, and prototypes that will later be wrapped", "string, sys __all__ = [\"Cmd\"] PROMPT = '(Cmd) ' IDENTCHARS", "help[name[5:]]=1 names.sort() # There can be duplicates if routines overridden", "class MyCmd(ElCmd,object): def do_bleep(self, s): print(\"bleep!\") def do_blob(self, s): print(\"blob!\")", "topics, and undocumented functions respectively. \"\"\" import string, sys __all__", "sys.stdin if stdout is not None: self.stdout = stdout else:", "method may be overridden to intercept completions for commands that", "end indexes of the text being matched, which could be", "interpreter framework. The optional argument 'completekey' is the readline name", "= len(list) ncols = 1 colwidths = [0] for row", "input line (lstripped), begidx and endidx are the beginning and", "break if totwidth <= displaywidth: break else: nrows = len(list)", "= self.completenames self.completion_matches = compfunc(text, line, begidx, endidx) try: return", "None: self.stdout = stdout else: self.stdout = sys.stdout self.cmdqueue =", "except EOFError: line = 'EOF' else: self.editline.prompt = self.prompt line", "= line if line == 'EOF' : print(\"\") print(\"Bye\") sys.exit(0)", "= set(self.completenames(*args)) topics = set(a[5:] for a in self.get_names() if", "the cmdloop() method. The data members `self.doc_header', `self.misc_header', and `self.undoc_header'", "// nrows colwidths = [] totwidth = -2 for col", "a completion key; it defaults to the Tab key. If", "line = self.cmdqueue.pop(0) else: if self.use_rawinput: try: line = input(self.prompt)", "prevname = name cmd=name[3:] if cmd in help: cmds_doc.append(cmd) del", "automatically, and completing of commands args is done by calling", "if hasattr(self, 'do_shell'): line = 'shell ' + line[1:] else:", "sys.exit(0) if cmd == '': return self.default(line) else: try: func", "\"\"\"Called when an empty line is entered in response to", "is parsed out of each line by collecting the prefix", "completion depending upon which position the argument is in. The", "len(origline) - len(line) begidx = self.editline.get_begidx() - stripped endidx =", "topics = set(a[5:] for a in self.get_names() if a.startswith('help_' +", "empty line is entered in response to the prompt. If", "output file objects; if not specified, sys.stdin and sys.stdout are", "in a subclass.) 5. There is a predefined `help' method.", "the interpreter should stop. \"\"\" cmd, arg, line = self.parseline(line)", "in range(len(texts)): texts[col] = texts[col].ljust(colwidths[col]) self.stdout.write(\"%s\\n\"%str(\" \".join(texts))) class MyCmd(ElCmd,object): def", "itself; rather, it's useful as a superclass of an interpreter", "line is the current input line (lstripped), begidx and endidx", "passing them the remainder of the line as argument. \"\"\"", "entered in response to the prompt. If this method is", "command. (Actually, it calls the method `emptyline', which may be", "precmd(self, line): \"\"\"Hook method executed just before the command line", "optional arguments stdin and stdout specify alternate input and output", "doc: self.stdout.write(\"%s\\n\"%str(doc)) return except AttributeError: pass self.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,))) return", "topics with defined help_ functions, broken into up to three", "self.use_rawinput and self.completekey: try: import editline self.editline = editline.editline(\"CMD\", self.stdin,", "if a.startswith('help_' + args[0])) return list(commands | topics) def do_help(self,", ">= size: x = \"\" else: x = list[i] texts.append(x)", "line). 'command' and 'args' may be None if the line", "members `self.doc_header', `self.misc_header', and `self.undoc_header' set the headers used for", "enough). \"\"\" if not list: self.stdout.write(\"<empty>\\n\") return nonstrings = [i", "completion key; it defaults to the Tab key. If completekey", "method `emptyline', which may be overridden in a subclass.) 5.", "for nrows in range(1, len(list)): ncols = (size+nrows-1) // nrows", "(command, args, line). 'command' and 'args' may be None if", "= 'EOF' else: line = line.rstrip('\\r\\n') line = self.precmd(line) stop", "ElCmd: \"\"\"A simple framework for writing line-oriented command interpreters. These", "[] cmds_undoc = [] help = {} for name in", "completion is enabled, completing commands will be done automatically, and", "'command' and 'args' may be None if the line couldn't", "in names: if name[:3] == 'do_': if name == prevname:", "self.default(line) return func(arg) def emptyline(self): \"\"\"Called when an empty line", "value of `self.intro' is nonempty when the cmdloop method is", "try: func = getattr(self, 'help_' + arg) except AttributeError: try:", "getattr(self, name).__doc__: cmds_doc.append(cmd) else: cmds_undoc.append(cmd) self.stdout.write(\"%s\\n\"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc, 15,80) self.print_topics(self.misc_header,", "upwards for nrows in range(1, len(list)): ncols = (size+nrows-1) //", "method is not overridden, it prints an error message and", "nrows*col if i >= size: x = \"\" else: x", "self.intro: self.stdout.write(str(self.intro)+\"\\n\") stop = None while not stop: if self.cmdqueue:", "line is entered in response to the prompt. If this", "of strings as a compact set of columns. Each column", "for a in self.get_names() if a.startswith('help_' + args[0])) return list(commands", "though it had been typed in response to the prompt.", "+ line[1:] else: return None, None, line i, n =", "class to build line-oriented command interpreters. Interpreters constructed with this", "be overridden to intercept completions for commands that have no", "= self.prompt line = self.editline.readline() if not len(line): line =", "the value of `self.intro' is nonempty when the cmdloop method", "`completedefault' method may be overridden to intercept completions for commands", "'do_foo()'; the do_ method is passed a single argument consisting", "[i for i in range(len(list)) if not isinstance(list[i], str)] if", "for useful execution hooks. The return value is a flag", "must begin with it. line is the current input line", "cmds_undoc = [] help = {} for name in names:", "[0] for row in range(nrows): texts = [] for col", "endidx. text is string we are matching against, all returned", "executed just before the command line is interpreted, but after", "this method is not overridden, it prints an error message", "if not len(line): line = 'EOF' else: line = line.rstrip('\\r\\n')", "every row count from 1 upwards for nrows in range(1,", "i = row + nrows*col if i >= size: break", "None def get_names(self): # This method used to pull in", "if arg: # XXX check arg syntax try: func =", "with arguments text, line, begidx, endidx. text is string we", "i+1 cmd, arg = line[:i], line[i:].strip() return cmd, arg, line", "useful execution hooks. The return value is a flag indicating", "is a line-oriented interpreter framework. There is no good reason", "in the help messages. If empty, no ruler line is", "do_bleep(self, s): print(\"bleep!\") def do_blob(self, s): print(\"blob!\") def do_bob(self, s):", "a command name and a string containing the arguments. Returns", "If completekey is not None and the readline module is", "of an interpreter class you define yourself in order to", "to intercept completions for commands that have no complete_ method.", "test harnesses, administrative tools, and prototypes that will later be", "or detailed help with \"help cmd\".' if arg: # XXX", "lines in the help messages. If empty, no ruler line", "no good reason to instantiate Cmd itself; rather, it's useful", "\"\"\"Hook method executed once when the cmdloop() method is called.\"\"\"", "string.ascii_letters + string.digits + '_' class ElCmd: \"\"\"A simple framework", "if doc: self.stdout.write(\"%s\\n\"%str(doc)) return except AttributeError: pass self.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,)))", "for `help'. The command '!' is a synonym for `shell',", "\"\"\" pass def parseline(self, line): \"\"\"Parse the line into a", "texts = [] for col in range(ncols): i = row", "of file on input is processed as the command 'EOF'.", "line: return self.emptyline() if cmd is None: return self.default(line) self.lastcmd", "for i in %s\" % \", \".join(map(str, nonstrings))) size =", "list of strings as a compact set of columns. Each", "in self.get_names() if a.startswith(dotext)] def complete(self, text, state): \"\"\"Return the", "messages. If empty, no ruler line is drawn. It defaults", "name == prevname: continue prevname = name cmd=name[3:] if cmd", "'_' class ElCmd: \"\"\"A simple framework for writing line-oriented command", "empty list. \"\"\" return [] def completenames(self, text, *ignored): dotext", "Cmd's methods and encapsulate action methods. \"\"\" prompt = PROMPT", "of completions. \"\"\" if state == 0: origline = self.editline.get_line_buffer()", "self.get_names() if a.startswith(dotext)] def complete(self, text, state): \"\"\"Return the next", "isinstance(list[i], str)] if nonstrings: raise TypeError(\"list[i] not a string for", "names = self.get_names() cmds_doc = [] cmds_undoc = [] help", "a prompt, accept input, parse an initial prefix off the", "command dispatch is finished.\"\"\" return stop def preloop(self): \"\"\"Hook method", "sys __all__ = [\"Cmd\"] PROMPT = '(Cmd) ' IDENTCHARS =", "range(len(texts)): texts[col] = texts[col].ljust(colwidths[col]) self.stdout.write(\"%s\\n\"%str(\" \".join(texts))) class MyCmd(ElCmd,object): def do_bleep(self,", "self.stdout = sys.stdout self.cmdqueue = [] self.completekey = completekey if", "\"\"\" if stdin is not None: self.stdin = stdin else:", "\"=\". If the value of `self.intro' is nonempty when the", "composed of characters in the identchars member. 3. A command", "a command dispatch is finished.\"\"\" return stop def preloop(self): \"\"\"Hook", "return [a[3:] for a in self.get_names() if a.startswith(dotext)] def complete(self,", "max(colwidth, len(x)) colwidths.append(colwidth) totwidth += colwidth + 2 if totwidth", "print(\"Failed to import editline\") pass def cmdloop(self, intro=None): \"\"\"Repeatedly issue", "it prints an error message and returns. \"\"\" self.stdout.write('*** Unknown", "completion is done automatically. The optional arguments stdin and stdout", "print(\"bob!\") def do_mods(self, s): print(sys.modules.keys()) if __name__ == '__main__': mc", "set of columns. Each column is only as wide as", "returned matches must begin with it. line is the current", "entered, then complete against command list. Otherwise try to call", "(%d)\\n' % (line,len(line))) def completedefault(self, *ignored): \"\"\"Method called to complete", "text, line, begidx, endidx. text is string we are matching", "return self.emptyline() if cmd is None: return self.default(line) self.lastcmd =", "is a predefined `help' method. Given an argument `topic', it", "returns an empty list. \"\"\" return [] def completenames(self, text,", "compfunc = self.completenames self.completion_matches = compfunc(text, line, begidx, endidx) try:", "line == 'EOF' : print(\"\") print(\"Bye\") sys.exit(0) if cmd ==", "self.get_names() cmds_doc = [] cmds_undoc = [] help = {}", "+ '_' class ElCmd: \"\"\"A simple framework for writing line-oriented", "useful for test harnesses, administrative tools, and prototypes that will", "stdin and stdout specify alternate input and output file objects;", "= stdout else: self.stdout = sys.stdout self.cmdqueue = [] self.completekey", "exists. 7. If completion is enabled, completing commands will be", "line def onecmd(self, line): \"\"\"Interpret the argument as though it", "len(line): line = 'EOF' else: line = line.rstrip('\\r\\n') line =", "of `self.intro' is nonempty when the cmdloop method is called,", "to get list of completions. \"\"\" if state == 0:", "with \"help cmd\".' if arg: # XXX check arg syntax", "self.get_names() if a.startswith('help_' + args[0])) return list(commands | topics) def", "if self.lastcmd: return self.onecmd(self.lastcmd) def default(self, line): \"\"\"Called on an", "to three topics; documented commands, miscellaneous help topics, and undocumented", "names: if name[:3] == 'do_': if name == prevname: continue", "not normally need to be; see the precmd() and postcmd()", "self.stdout.write(\"%s\\n\"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc, 15,80) self.print_topics(self.misc_header, list(help.keys()),15,80) self.print_topics(self.undoc_header, cmds_undoc, 15,80) def", "line into a command name and a string containing the", "len(line) begidx = self.editline.get_begidx() - stripped endidx = self.editline.get_endidx() -", "of each line by collecting the prefix composed of characters", "help messages. If empty, no ruler line is drawn. It", "undocumented functions respectively. \"\"\" import string, sys __all__ = [\"Cmd\"]", "against command list. Otherwise try to call complete_<command> to get", "on input is processed as the command 'EOF'. 2. A", "<reponame>mark-nicholson/python-editline \"\"\"A generic class to build line-oriented command interpreters. Interpreters", "list of completions. \"\"\" if state == 0: origline =", "sets the character used to draw separator lines in the", "but after the input prompt is generated and issued. \"\"\"", "If a command has not been entered, then complete against", "begin with it. line is the current input line (lstripped),", "n and line[i] in self.identchars: i = i+1 cmd, arg", "import editline\") pass def cmdloop(self, intro=None): \"\"\"Repeatedly issue a prompt,", "line.strip() if not line: return None, None, line elif line[0]", "commands args is done by calling complete_foo() with arguments text,", "self.parseline(line) if cmd == '': compfunc = self.completedefault else: try:", "'do_' + cmd) except AttributeError: return self.default(line) return func(arg) def", "commands will be done automatically, and completing of commands args", "`help'. The command '!' is a synonym for `shell', if", "may be overridden, but should not normally need to be;", "commands. 6. The command '?' is a synonym for `help'.", "self.completion_matches = compfunc(text, line, begidx, endidx) try: return self.completion_matches[state] except", "while texts and not texts[-1]: del texts[-1] for col in", "empty, no ruler line is drawn. It defaults to \"=\".", "the arguments. Returns a tuple containing (command, args, line). 'command'", "list[i] texts.append(x) while texts and not texts[-1]: del texts[-1] for", "= self.postcmd(stop, line) self.postloop() finally: pass def precmd(self, line): \"\"\"Hook", "overridden, it repeats the last nonempty command entered. \"\"\" if", "input prompt is generated and issued. \"\"\" return line def", "functions, broken into up to three topics; documented commands, miscellaneous", "cmd) except AttributeError: return self.default(line) return func(arg) def emptyline(self): \"\"\"Called", "need to be; see the precmd() and postcmd() methods for", "i >= size: break x = list[i] colwidth = max(colwidth,", "default, it returns an empty list. \"\"\" return [] def", "and postcmd() methods for useful execution hooks. The return value", "complete an input line when no command-specific complete_*() method is", "`self.doc_header', `self.misc_header', and `self.undoc_header' set the headers used for the", "a predefined `help' method. Given an argument `topic', it calls", "def do_bob(self, s): print(\"bob!\") def do_mods(self, s): print(sys.modules.keys()) if __name__", "len(list) ncols = 1 colwidths = [0] for row in", "when the cmdloop() method is called.\"\"\" pass def postloop(self): \"\"\"Hook", "'List available commands with \"help\" or detailed help with \"help", "rather, it's useful as a superclass of an interpreter class", "[] def completenames(self, text, *ignored): dotext = 'do_'+text return [a[3:]", "for the help function's listings of documented functions, miscellaneous topics,", "If the value of `self.intro' is nonempty when the cmdloop", "== prevname: continue prevname = name cmd=name[3:] if cmd in", "of characters in the identchars member. 3. A command `foo'", "may be overridden via an optional argument to the cmdloop()", "complete_*() method is available. By default, it returns an empty", "parseline(self, line): \"\"\"Parse the line into a command name and", "= self.editline.get_line_buffer() line = origline.lstrip() stripped = len(origline) - len(line)", "prefix composed of characters in the identchars member. 3. A", "len(list)): ncols = (size+nrows-1) // nrows colwidths = [] totwidth", "line is interpreted, but after the input prompt is generated", "in range(1, len(list)): ncols = (size+nrows-1) // nrows colwidths =", "= self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) self.postloop()", "typed in response to the prompt. This may be overridden,", "for which there is no do_ method. The `completedefault' method", "next possible completion for 'text'. If a command has not", "method executed once when the cmdloop() method is about to", "method executed once when the cmdloop() method is called.\"\"\" pass", "do_help(self, arg): 'List available commands with \"help\" or detailed help", "i = row + nrows*col if i >= size: x", "method is about to return. \"\"\" pass def parseline(self, line):", "specify alternate input and output file objects; if not specified,", "def completedefault(self, *ignored): \"\"\"Method called to complete an input line", "None, line elif line[0] == '?': line = 'help '", "generic class to build line-oriented command interpreters. Interpreters constructed with", "in names: if name[:5] == 'help_': help[name[5:]]=1 names.sort() # There", "= len(list) if size == 1: self.stdout.write('%s\\n'%str(list[0])) return # Try", "with this class obey the following conventions: 1. End of", "the beginning and end indexes of the text being matched,", "called, it is printed out on interpreter startup. This value", "last nonempty command entered. \"\"\" if self.lastcmd: return self.onecmd(self.lastcmd) def", "if self.intro: self.stdout.write(str(self.intro)+\"\\n\") stop = None while not stop: if", ": print(\"\") print(\"Bye\") sys.exit(0) if cmd == '': return self.default(line)", "line): \"\"\"Called on an input line when the command prefix", "position the argument is in. The `default' method may be", "stdout=None): \"\"\"Instantiate a line-oriented interpreter framework. The optional argument 'completekey'", "'do_shell'): line = 'shell ' + line[1:] else: return None,", "and returns. \"\"\" self.stdout.write('*** Unknown syntax: %s (%d)\\n' % (line,len(line)))", "the prompt. If this method is not overridden, it repeats", "\"\"\" self.stdout.write('*** Unknown syntax: %s (%d)\\n' % (line,len(line))) def completedefault(self,", "endidx are the beginning and end indexes of the text", "constructed with this class obey the following conventions: 1. End", "build line-oriented command interpreters. Interpreters constructed with this class obey", "in range(nrows): texts = [] for col in range(ncols): i", "pass def parseline(self, line): \"\"\"Parse the line into a command", "arg, line = self.parseline(line) if not line: return self.emptyline() if", "the prompt. This may be overridden, but should not normally", "help topics:\" undoc_header = \"Undocumented commands:\" nohelp = \"*** No", "command '?' is a synonym for `help'. The command '!'", "self.parseline(line) if not line: return self.emptyline() if cmd is None:", "'do_' + arg).__doc__ if doc: self.stdout.write(\"%s\\n\"%str(doc)) return except AttributeError: pass", "def parseline(self, line): \"\"\"Parse the line into a command name", "be overridden, but should not normally need to be; see", "has not been entered, then complete against command list. Otherwise", "if begidx>0: cmd, args, foo = self.parseline(line) if cmd ==", "'!' is a synonym for `shell', if a do_shell method", "line = self.editline.readline() if not len(line): line = 'EOF' else:", "could be used to provide different completion depending upon which", "except AttributeError: return self.default(line) return func(arg) def emptyline(self): \"\"\"Called when", "couldn't be parsed. \"\"\" line = line.strip() if not line:", "A command is parsed out of each line by collecting", "syntax: %s (%d)\\n' % (line,len(line))) def completedefault(self, *ignored): \"\"\"Method called", "self.use_rawinput: try: line = input(self.prompt) except EOFError: line = 'EOF'", "try: compfunc = getattr(self, 'complete_' + cmd) except AttributeError: compfunc", "= getattr(self, 'complete_' + cmd) except AttributeError: compfunc = self.completedefault", "in order to inherit Cmd's methods and encapsulate action methods.", "+ cmd) except AttributeError: return self.default(line) return func(arg) def emptyline(self):", "= {} for name in names: if name[:5] == 'help_':", "the precmd() and postcmd() methods for useful execution hooks. The", "being matched, which could be used to provide different completion", "self.columnize(cmds, maxcol-1) self.stdout.write(\"\\n\") def columnize(self, list, displaywidth=80): \"\"\"Display a list", "print(\"bleep!\") def do_blob(self, s): print(\"blob!\") def do_bob(self, s): print(\"bob!\") def", "error message and returns. \"\"\" self.stdout.write('*** Unknown syntax: %s (%d)\\n'", "= self.editline.get_endidx() - stripped if begidx>0: cmd, args, foo =", "1 colwidths = [0] for row in range(nrows): texts =", "synonym for `shell', if a do_shell method exists. 7. If", "else: self.editline.prompt = self.prompt line = self.editline.readline() if not len(line):", "obey the following conventions: 1. End of file on input", "self.stdout.write(\"%s\\n\"%str(self.ruler * len(header))) self.columnize(cmds, maxcol-1) self.stdout.write(\"\\n\") def columnize(self, list, displaywidth=80):", "to instantiate Cmd itself; rather, it's useful as a superclass", "cmdloop() method is about to return. \"\"\" pass def parseline(self,", "repeats the last command. (Actually, it calls the method `emptyline',", "help on %s\" use_rawinput = False def __init__(self, completekey='tab', stdin=None,", "\"help\" or detailed help with \"help cmd\".' if arg: #", "stdin is not None: self.stdin = stdin else: self.stdin =", "col in range(ncols): i = row + nrows*col if i", "be duplicates if routines overridden prevname = '' for name", "and the readline module is available, command completion is done", "overridden, it prints an error message and returns. \"\"\" self.stdout.write('***", "<= displaywidth: break else: nrows = len(list) ncols = 1", "by collecting the prefix composed of characters in the identchars", "'do_'+text return [a[3:] for a in self.get_names() if a.startswith(dotext)] def", "methods and encapsulate action methods. \"\"\" prompt = PROMPT identchars", "'EOF' else: line = line.rstrip('\\r\\n') line = self.precmd(line) stop =", "With no arguments, it lists all topics with defined help_", "pass self.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,))) return func() else: names = self.get_names()", "None, line i, n = 0, len(line) while i <", "misc_header = \"Miscellaneous help topics:\" undoc_header = \"Undocumented commands:\" nohelp", "= [] for col in range(ncols): i = row +", "it calls the method `emptyline', which may be overridden in", "row in range(nrows): texts = [] for col in range(ncols):", "to build line-oriented command interpreters. Interpreters constructed with this class", "def get_names(self): # This method used to pull in base", "command line is interpreted, but after the input prompt is", "startup. This value may be overridden via an optional argument", "available commands with \"help\" or detailed help with \"help cmd\".'", "\"help cmd\".' if arg: # XXX check arg syntax try:", "and self.completekey: try: import editline self.editline = editline.editline(\"CMD\", self.stdin, self.stdout,", "wrapped in a more sophisticated interface. A Cmd instance or", "if not specified, sys.stdin and sys.stdout are used. \"\"\" if", "automatically. The optional arguments stdin and stdout specify alternate input", "def do_bleep(self, s): print(\"bleep!\") def do_blob(self, s): print(\"blob!\") def do_bob(self,", "may be overridden to intercept commands for which there is", "parsed. \"\"\" line = line.strip() if not line: return None,", "argument is in. The `default' method may be overridden to", "if i >= size: break x = list[i] colwidth =", "necessary. Columns are separated by two spaces (one was not", "to provide different completion depending upon which position the argument", "out on interpreter startup. This value may be overridden via", "not None: self.stdin = stdin else: self.stdin = sys.stdin if", "line (lstripped), begidx and endidx are the beginning and end", "columns. Each column is only as wide as necessary. Columns", "stop. \"\"\" cmd, arg, line = self.parseline(line) if not line:", "be done automatically, and completing of commands args is done", "= list[i] colwidth = max(colwidth, len(x)) colwidths.append(colwidth) totwidth += colwidth", "line. 4. Typing an empty line repeats the last command.", "except ImportError: print(\"Failed to import editline\") pass def cmdloop(self, intro=None):", "= editline.editline(\"CMD\", self.stdin, self.stdout, sys.stderr) self.editline.rl_completer = self.complete except ImportError:", "None: self.intro = intro if self.intro: self.stdout.write(str(self.intro)+\"\\n\") stop = None", "prompt. This may be overridden, but should not normally need", "as necessary. Columns are separated by two spaces (one was", "headers used for the help function's listings of documented functions,", "completekey if not self.use_rawinput and self.completekey: try: import editline self.editline", "self.stdout.write(str(self.intro)+\"\\n\") stop = None while not stop: if self.cmdqueue: line", "print_topics(self, header, cmds, cmdlen, maxcol): if cmds: self.stdout.write(\"%s\\n\"%str(header)) if self.ruler:", "names.sort() # There can be duplicates if routines overridden prevname", "len(list) if size == 1: self.stdout.write('%s\\n'%str(list[0])) return # Try every", "n = 0, len(line) while i < n and line[i]", "try to call complete_<command> to get list of completions. \"\"\"", "= \"\" else: x = list[i] texts.append(x) while texts and", "argument 'completekey' is the readline name of a completion key;", "if i >= size: x = \"\" else: x =", "line-oriented command interpreters. Interpreters constructed with this class obey the", "been typed in response to the prompt. This may be", "range(len(list)) if not isinstance(list[i], str)] if nonstrings: raise TypeError(\"list[i] not", "self.stdin, self.stdout, sys.stderr) self.editline.rl_completer = self.complete except ImportError: print(\"Failed to", "set the headers used for the help function's listings of", "interpreted, but after the input prompt is generated and issued.", "repeats the last nonempty command entered. \"\"\" if self.lastcmd: return", "and 'args' may be None if the line couldn't be", "By default, it returns an empty list. \"\"\" return []", "TypeError(\"list[i] not a string for i in %s\" % \",", "the help messages. If empty, no ruler line is drawn.", "duplicates if routines overridden prevname = '' for name in", "The data members `self.doc_header', `self.misc_header', and `self.undoc_header' set the headers", "else: return None, None, line i, n = 0, len(line)", "command interpreters. These are often useful for test harnesses, administrative", "is finished.\"\"\" return stop def preloop(self): \"\"\"Hook method executed once", "1: self.stdout.write('%s\\n'%str(list[0])) return # Try every row count from 1", "do_blob(self, s): print(\"blob!\") def do_bob(self, s): print(\"bob!\") def do_mods(self, s):", "*args): commands = set(self.completenames(*args)) topics = set(a[5:] for a in", "There is no good reason to instantiate Cmd itself; rather,", "texts[col].ljust(colwidths[col]) self.stdout.write(\"%s\\n\"%str(\" \".join(texts))) class MyCmd(ElCmd,object): def do_bleep(self, s): print(\"bleep!\") def", "complete_help(self, *args): commands = set(self.completenames(*args)) topics = set(a[5:] for a", "getattr(self, 'complete_' + cmd) except AttributeError: compfunc = self.completedefault else:", "prefix off the received input, and dispatch to action methods,", "2 if totwidth > displaywidth: break if totwidth <= displaywidth:", "return line def postcmd(self, stop, line): \"\"\"Hook method executed just", "try: import editline self.editline = editline.editline(\"CMD\", self.stdin, self.stdout, sys.stderr) self.editline.rl_completer", "stop, line): \"\"\"Hook method executed just after a command dispatch", "args, line). 'command' and 'args' may be None if the", "line = origline.lstrip() stripped = len(origline) - len(line) begidx =", "a synonym for `help'. The command '!' is a synonym", "completekey='tab', stdin=None, stdout=None): \"\"\"Instantiate a line-oriented interpreter framework. The optional", "try: if intro is not None: self.intro = intro if", "\"\"\"Parse the line into a command name and a string", "# XXX check arg syntax try: func = getattr(self, 'help_'", "AttributeError: compfunc = self.completedefault else: compfunc = self.completenames self.completion_matches =", "editline.editline(\"CMD\", self.stdin, self.stdout, sys.stderr) self.editline.rl_completer = self.complete except ImportError: print(\"Failed", "= line.strip() if not line: return None, None, line elif", "a subclass.) 5. There is a predefined `help' method. Given", "prompt = PROMPT identchars = IDENTCHARS ruler = '=' lastcmd", "`shell', if a do_shell method exists. 7. If completion is", "and line[i] in self.identchars: i = i+1 cmd, arg =", "command prefix is not recognized. If this method is not", "do_mods(self, s): print(sys.modules.keys()) if __name__ == '__main__': mc = MyCmd()", "miscellaneous help topics, and undocumented commands. 6. The command '?'", "intercept completions for commands that have no complete_ method. The", "and issued. \"\"\" return line def postcmd(self, stop, line): \"\"\"Hook", "for col in range(len(texts)): texts[col] = texts[col].ljust(colwidths[col]) self.stdout.write(\"%s\\n\"%str(\" \".join(texts))) class", "else: names = self.get_names() cmds_doc = [] cmds_undoc = []", "beginning and end indexes of the text being matched, which", "self.cmdqueue: line = self.cmdqueue.pop(0) else: if self.use_rawinput: try: line =", "None: self.stdin = stdin else: self.stdin = sys.stdin if stdout", "more sophisticated interface. A Cmd instance or subclass instance is", "the headers used for the help function's listings of documented", "= False def __init__(self, completekey='tab', stdin=None, stdout=None): \"\"\"Instantiate a line-oriented", "self.emptyline() if cmd is None: return self.default(line) self.lastcmd = line", "== 0: origline = self.editline.get_line_buffer() line = origline.lstrip() stripped =", "= self.get_names() cmds_doc = [] cmds_undoc = [] help =", "'=' lastcmd = '' intro = None doc_leader = \"\"", "to draw separator lines in the help messages. If empty,", "encapsulate action methods. \"\"\" prompt = PROMPT identchars = IDENTCHARS", "may be overridden in a subclass.) 5. There is a", "method is called, it is printed out on interpreter startup.", "single argument consisting of the remainder of the line. 4.", "once when the cmdloop() method is called.\"\"\" pass def postloop(self):", "line repeats the last command. (Actually, it calls the method", "editline\") pass def cmdloop(self, intro=None): \"\"\"Repeatedly issue a prompt, accept", "PROMPT identchars = IDENTCHARS ruler = '=' lastcmd = ''", "only as wide as necessary. Columns are separated by two", "If this method is not overridden, it repeats the last", "begidx, endidx. text is string we are matching against, all", "return [] def completenames(self, text, *ignored): dotext = 'do_'+text return", "is not overridden, it prints an error message and returns.", "name in names: if name[:3] == 'do_': if name ==", "nrows*col if i >= size: break x = list[i] colwidth", "current input line (lstripped), begidx and endidx are the beginning", "columnize(self, list, displaywidth=80): \"\"\"Display a list of strings as a", "a more sophisticated interface. A Cmd instance or subclass instance", "member `self.ruler' sets the character used to draw separator lines", "matches must begin with it. line is the current input", "string containing the arguments. Returns a tuple containing (command, args,", "we are matching against, all returned matches must begin with", "but should not normally need to be; see the precmd()", "cmds_undoc.append(cmd) self.stdout.write(\"%s\\n\"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc, 15,80) self.print_topics(self.misc_header, list(help.keys()),15,80) self.print_topics(self.undoc_header, cmds_undoc, 15,80)", "elif line[0] == '!': if hasattr(self, 'do_shell'): line = 'shell", "remainder of the line as argument. \"\"\" self.preloop() try: if", "= row + nrows*col if i >= size: break x", "def postcmd(self, stop, line): \"\"\"Hook method executed just after a", "the command `help_topic'. With no arguments, it lists all topics", "the last command. (Actually, it calls the method `emptyline', which", "you define yourself in order to inherit Cmd's methods and", "\".join(texts))) class MyCmd(ElCmd,object): def do_bleep(self, s): print(\"bleep!\") def do_blob(self, s):", "cmds_doc = [] cmds_undoc = [] help = {} for", "for `shell', if a do_shell method exists. 7. If completion", "arg = line[:i], line[i:].strip() return cmd, arg, line def onecmd(self,", "by two spaces (one was not legible enough). \"\"\" if", "two spaces (one was not legible enough). \"\"\" if not", "\"\"\" prompt = PROMPT identchars = IDENTCHARS ruler = '='", "to the prompt. If this method is not overridden, it", "arg: # XXX check arg syntax try: func = getattr(self,", "broken into up to three topics; documented commands, miscellaneous help", "for row in range(nrows): texts = [] for col in", "return value is a flag indicating whether interpretation of commands", "draw separator lines in the help messages. If empty, no", "self.prompt line = self.editline.readline() if not len(line): line = 'EOF'", "commands that have no complete_ method. The data member `self.ruler'", "called.\"\"\" pass def postloop(self): \"\"\"Hook method executed once when the", "len(line) while i < n and line[i] in self.identchars: i", "dotext = 'do_'+text return [a[3:] for a in self.get_names() if", "+ 2 if totwidth > displaywidth: break if totwidth <=", "functions, miscellaneous topics, and undocumented functions respectively. \"\"\" import string,", "None while not stop: if self.cmdqueue: line = self.cmdqueue.pop(0) else:", "lists all topics with defined help_ functions, broken into up", "def complete(self, text, state): \"\"\"Return the next possible completion for", "Returns a tuple containing (command, args, line). 'command' and 'args'", "remainder of the line. 4. Typing an empty line repeats", "editline self.editline = editline.editline(\"CMD\", self.stdin, self.stdout, sys.stderr) self.editline.rl_completer = self.complete", "range(nrows): texts = [] for col in range(ncols): i =", "subclass instance is a line-oriented interpreter framework. There is no", "a in self.get_names() if a.startswith(dotext)] def complete(self, text, state): \"\"\"Return", "argument as though it had been typed in response to", "args[0])) return list(commands | topics) def do_help(self, arg): 'List available", "'EOF'. 2. A command is parsed out of each line", "is about to return. \"\"\" pass def parseline(self, line): \"\"\"Parse", "= [] cmds_undoc = [] help = {} for name", ">= size: break x = list[i] colwidth = max(colwidth, len(x))", "texts[-1]: del texts[-1] for col in range(len(texts)): texts[col] = texts[col].ljust(colwidths[col])", "= self.parseline(line) if cmd == '': compfunc = self.completedefault else:", "'completekey' is the readline name of a completion key; it", "is None: return self.default(line) self.lastcmd = line if line ==", "time dir() didn't do it yet. return dir(self.__class__) def complete_help(self,", "try: func = getattr(self, 'do_' + cmd) except AttributeError: return", "intro = None doc_leader = \"\" doc_header = \"Documented commands", "an empty line repeats the last command. (Actually, it calls", "of columns. Each column is only as wide as necessary.", "line = line.rstrip('\\r\\n') line = self.precmd(line) stop = self.onecmd(line) stop", "return None, None, line elif line[0] == '?': line =", "see the precmd() and postcmd() methods for useful execution hooks.", "% \", \".join(map(str, nonstrings))) size = len(list) if size ==", "specified, sys.stdin and sys.stdout are used. \"\"\" if stdin is", "order to inherit Cmd's methods and encapsulate action methods. \"\"\"", "return cmd, arg, line def onecmd(self, line): \"\"\"Interpret the argument", "had been typed in response to the prompt. This may", "text is string we are matching against, all returned matches", "accept input, parse an initial prefix off the received input,", "are the beginning and end indexes of the text being", "method. The data members `self.doc_header', `self.misc_header', and `self.undoc_header' set the", "colwidths.append(colwidth) totwidth += colwidth + 2 if totwidth > displaywidth:", "= [] help = {} for name in names: if", "self.editline = editline.editline(\"CMD\", self.stdin, self.stdout, sys.stderr) self.editline.rl_completer = self.complete except", "not overridden, it prints an error message and returns. \"\"\"", "file objects; if not specified, sys.stdin and sys.stdout are used.", "+ line[1:] elif line[0] == '!': if hasattr(self, 'do_shell'): line", "method is available. By default, it returns an empty list.", "input, and dispatch to action methods, passing them the remainder", "wide as necessary. Columns are separated by two spaces (one", "foo = self.parseline(line) if cmd == '': compfunc = self.completedefault", "row + nrows*col if i >= size: x = \"\"", "overridden in a subclass.) 5. There is a predefined `help'", "do it yet. return dir(self.__class__) def complete_help(self, *args): commands =", "the remainder of the line as argument. \"\"\" self.preloop() try:", "of documented functions, miscellaneous topics, and undocumented functions respectively. \"\"\"", "\"\"\" return [] def completenames(self, text, *ignored): dotext = 'do_'+text", "line if line == 'EOF' : print(\"\") print(\"Bye\") sys.exit(0) if", "self.completedefault else: compfunc = self.completenames self.completion_matches = compfunc(text, line, begidx,", "def complete_help(self, *args): commands = set(self.completenames(*args)) topics = set(a[5:] for", "line when the command prefix is not recognized. If this", "line as argument. \"\"\" self.preloop() try: if intro is not", "the command line is interpreted, but after the input prompt", "self.print_topics(self.doc_header, cmds_doc, 15,80) self.print_topics(self.misc_header, list(help.keys()),15,80) self.print_topics(self.undoc_header, cmds_undoc, 15,80) def print_topics(self,", "intro=None): \"\"\"Repeatedly issue a prompt, accept input, parse an initial", "if size == 1: self.stdout.write('%s\\n'%str(list[0])) return # Try every row", "colwidths = [] totwidth = -2 for col in range(ncols):", "a flag indicating whether interpretation of commands by the interpreter", "by calling complete_foo() with arguments text, line, begidx, endidx. text", "+ args[0])) return list(commands | topics) def do_help(self, arg): 'List", "of the line. 4. Typing an empty line repeats the", "self.postloop() finally: pass def precmd(self, line): \"\"\"Hook method executed just", "\"\"\"Called on an input line when the command prefix is", "interpreters. These are often useful for test harnesses, administrative tools,", "as wide as necessary. Columns are separated by two spaces", "line by collecting the prefix composed of characters in the", "nonstrings = [i for i in range(len(list)) if not isinstance(list[i],", "called to complete an input line when no command-specific complete_*()", "legible enough). \"\"\" if not list: self.stdout.write(\"<empty>\\n\") return nonstrings =", "the readline module is available, command completion is done automatically.", "+= colwidth + 2 if totwidth > displaywidth: break if", "is interpreted, but after the input prompt is generated and", "header, cmds, cmdlen, maxcol): if cmds: self.stdout.write(\"%s\\n\"%str(header)) if self.ruler: self.stdout.write(\"%s\\n\"%str(self.ruler", "= sys.stdin if stdout is not None: self.stdout = stdout", "not recognized. If this method is not overridden, it prints", "out of each line by collecting the prefix composed of", "ImportError: print(\"Failed to import editline\") pass def cmdloop(self, intro=None): \"\"\"Repeatedly", "an initial prefix off the received input, and dispatch to", "= -2 for col in range(ncols): colwidth = 0 for", "= \"*** No help on %s\" use_rawinput = False def", "message and returns. \"\"\" self.stdout.write('*** Unknown syntax: %s (%d)\\n' %", "return self.onecmd(self.lastcmd) def default(self, line): \"\"\"Called on an input line", "complete_ method. The data member `self.ruler' sets the character used", "self.lastcmd: return self.onecmd(self.lastcmd) def default(self, line): \"\"\"Called on an input", "def completenames(self, text, *ignored): dotext = 'do_'+text return [a[3:] for", "if state == 0: origline = self.editline.get_line_buffer() line = origline.lstrip()", "in %s\" % \", \".join(map(str, nonstrings))) size = len(list) if", "and encapsulate action methods. \"\"\" prompt = PROMPT identchars =", "return # Try every row count from 1 upwards for", "nonempty when the cmdloop method is called, it is printed", "method. The `completedefault' method may be overridden to intercept completions", "command-specific complete_*() method is available. By default, it returns an", "IndexError: return None def get_names(self): # This method used to", "= list[i] texts.append(x) while texts and not texts[-1]: del texts[-1]", "0: origline = self.editline.get_line_buffer() line = origline.lstrip() stripped = len(origline)", "a.startswith(dotext)] def complete(self, text, state): \"\"\"Return the next possible completion", "s): print(sys.modules.keys()) if __name__ == '__main__': mc = MyCmd() mc.cmdloop()", "self.onecmd(self.lastcmd) def default(self, line): \"\"\"Called on an input line when", "== 1: self.stdout.write('%s\\n'%str(list[0])) return # Try every row count from", "try: line = input(self.prompt) except EOFError: line = 'EOF' else:", "in range(len(list)) if not isinstance(list[i], str)] if nonstrings: raise TypeError(\"list[i]", "= getattr(self, 'help_' + arg) except AttributeError: try: doc=getattr(self, 'do_'", "if totwidth > displaywidth: break if totwidth <= displaywidth: break", "pull in base class attributes # at a time dir()", "sys.stdout are used. \"\"\" if stdin is not None: self.stdin", "line: return None, None, line elif line[0] == '?': line", "may be None if the line couldn't be parsed. \"\"\"", "\"Documented commands (type help <topic>):\" misc_header = \"Miscellaneous help topics:\"", "list(help.keys()),15,80) self.print_topics(self.undoc_header, cmds_undoc, 15,80) def print_topics(self, header, cmds, cmdlen, maxcol):", "of the remainder of the line. 4. Typing an empty", "matched, which could be used to provide different completion depending", "prints an error message and returns. \"\"\" self.stdout.write('*** Unknown syntax:", "class attributes # at a time dir() didn't do it", "= line.rstrip('\\r\\n') line = self.precmd(line) stop = self.onecmd(line) stop =", "preloop(self): \"\"\"Hook method executed once when the cmdloop() method is", "action methods, passing them the remainder of the line as", "stdout else: self.stdout = sys.stdout self.cmdqueue = [] self.completekey =", "in. The `default' method may be overridden to intercept commands", "*ignored): dotext = 'do_'+text return [a[3:] for a in self.get_names()", "text, state): \"\"\"Return the next possible completion for 'text'. If", "entered. \"\"\" if self.lastcmd: return self.onecmd(self.lastcmd) def default(self, line): \"\"\"Called", "maxcol): if cmds: self.stdout.write(\"%s\\n\"%str(header)) if self.ruler: self.stdout.write(\"%s\\n\"%str(self.ruler * len(header))) self.columnize(cmds,", "\"\"\" if state == 0: origline = self.editline.get_line_buffer() line =", "__all__ = [\"Cmd\"] PROMPT = '(Cmd) ' IDENTCHARS = string.ascii_letters", "else: nrows = len(list) ncols = 1 colwidths = [0]", "__init__(self, completekey='tab', stdin=None, stdout=None): \"\"\"Instantiate a line-oriented interpreter framework. The", "Cmd instance or subclass instance is a line-oriented interpreter framework.", "= [] totwidth = -2 for col in range(ncols): colwidth", "[] help = {} for name in names: if name[:5]", "is processed as the command 'EOF'. 2. A command is", "except AttributeError: pass self.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,))) return func() else: names", "to return. \"\"\" pass def parseline(self, line): \"\"\"Parse the line", "for test harnesses, administrative tools, and prototypes that will later", "postcmd() methods for useful execution hooks. The return value is", "if self.cmdqueue: line = self.cmdqueue.pop(0) else: if self.use_rawinput: try: line", "help = {} for name in names: if name[:5] ==", "= \"Documented commands (type help <topic>):\" misc_header = \"Miscellaneous help", "used for the help function's listings of documented functions, miscellaneous", "framework for writing line-oriented command interpreters. These are often useful", "range(ncols): i = row + nrows*col if i >= size:", "The command '?' is a synonym for `help'. The command", "should not normally need to be; see the precmd() and", "is not overridden, it repeats the last nonempty command entered.", "def columnize(self, list, displaywidth=80): \"\"\"Display a list of strings as", "defaults to the Tab key. If completekey is not None", "else: if self.use_rawinput: try: line = input(self.prompt) except EOFError: line", "'': return self.default(line) else: try: func = getattr(self, 'do_' +", "self.editline.rl_completer = self.complete except ImportError: print(\"Failed to import editline\") pass", "it calls the command `help_topic'. With no arguments, it lists", "flag indicating whether interpretation of commands by the interpreter should", "print(\"Bye\") sys.exit(0) if cmd == '': return self.default(line) else: try:", "are matching against, all returned matches must begin with it.", "def cmdloop(self, intro=None): \"\"\"Repeatedly issue a prompt, accept input, parse", "alternate input and output file objects; if not specified, sys.stdin", "prototypes that will later be wrapped in a more sophisticated", "class you define yourself in order to inherit Cmd's methods", "= i+1 cmd, arg = line[:i], line[i:].strip() return cmd, arg,", "self.stdout, sys.stderr) self.editline.rl_completer = self.complete except ImportError: print(\"Failed to import", "`self.intro' is nonempty when the cmdloop method is called, it", "if self.ruler: self.stdout.write(\"%s\\n\"%str(self.ruler * len(header))) self.columnize(cmds, maxcol-1) self.stdout.write(\"\\n\") def columnize(self,", "def default(self, line): \"\"\"Called on an input line when the", "*ignored): \"\"\"Method called to complete an input line when no", "administrative tools, and prototypes that will later be wrapped in", "method may be overridden to intercept commands for which there", "is no good reason to instantiate Cmd itself; rather, it's", "been entered, then complete against command list. Otherwise try to", "Cmd itself; rather, it's useful as a superclass of an", "totwidth = -2 for col in range(ncols): colwidth = 0", "to inherit Cmd's methods and encapsulate action methods. \"\"\" prompt", "s): print(\"bob!\") def do_mods(self, s): print(sys.modules.keys()) if __name__ == '__main__':", "\"\"\"Return the next possible completion for 'text'. If a command", "\"\"\"A simple framework for writing line-oriented command interpreters. These are", "for i in range(len(list)) if not isinstance(list[i], str)] if nonstrings:", "the argument as though it had been typed in response", "nrows in range(1, len(list)): ncols = (size+nrows-1) // nrows colwidths", "whether interpretation of commands by the interpreter should stop. \"\"\"", "arguments stdin and stdout specify alternate input and output file", "cmd is None: return self.default(line) self.lastcmd = line if line", "are used. \"\"\" if stdin is not None: self.stdin =", "name and a string containing the arguments. Returns a tuple", "the argument is in. The `default' method may be overridden", "`topic', it calls the command `help_topic'. With no arguments, it", "'': compfunc = self.completedefault else: try: compfunc = getattr(self, 'complete_'", "colwidth + 2 if totwidth > displaywidth: break if totwidth", "instance or subclass instance is a line-oriented interpreter framework. There", "default(self, line): \"\"\"Called on an input line when the command", "instantiate Cmd itself; rather, it's useful as a superclass of", "AttributeError: try: doc=getattr(self, 'do_' + arg).__doc__ if doc: self.stdout.write(\"%s\\n\"%str(doc)) return", "recognized. If this method is not overridden, it prints an", "key. If completekey is not None and the readline module", "= self.completedefault else: compfunc = self.completenames self.completion_matches = compfunc(text, line,", "class obey the following conventions: 1. End of file on", "`self.misc_header', and `self.undoc_header' set the headers used for the help", "synonym for `help'. The command '!' is a synonym for", "not overridden, it repeats the last nonempty command entered. \"\"\"", "== 'help_': help[name[5:]]=1 names.sort() # There can be duplicates if", "a single argument consisting of the remainder of the line.", "when the command prefix is not recognized. If this method", "stop: if self.cmdqueue: line = self.cmdqueue.pop(0) else: if self.use_rawinput: try:", "is a synonym for `help'. The command '!' is a", "line[:i], line[i:].strip() return cmd, arg, line def onecmd(self, line): \"\"\"Interpret", "compact set of columns. Each column is only as wide", "= self.editline.get_begidx() - stripped endidx = self.editline.get_endidx() - stripped if", "'?': line = 'help ' + line[1:] elif line[0] ==", "begidx>0: cmd, args, foo = self.parseline(line) if cmd == '':", "Try every row count from 1 upwards for nrows in", "[\"Cmd\"] PROMPT = '(Cmd) ' IDENTCHARS = string.ascii_letters + string.digits", "i >= size: x = \"\" else: x = list[i]", "it defaults to the Tab key. If completekey is not", "compfunc(text, line, begidx, endidx) try: return self.completion_matches[state] except IndexError: return", "self.stdout.write(\"%s\\n\"%str(header)) if self.ruler: self.stdout.write(\"%s\\n\"%str(self.ruler * len(header))) self.columnize(cmds, maxcol-1) self.stdout.write(\"\\n\") def", "not stop: if self.cmdqueue: line = self.cmdqueue.pop(0) else: if self.use_rawinput:", "good reason to instantiate Cmd itself; rather, it's useful as", "of the text being matched, which could be used to", "`self.undoc_header' set the headers used for the help function's listings", "displaywidth: break else: nrows = len(list) ncols = 1 colwidths", "else: self.stdout = sys.stdout self.cmdqueue = [] self.completekey = completekey", "prefix is not recognized. If this method is not overridden,", "list, displaywidth=80): \"\"\"Display a list of strings as a compact", "and undocumented functions respectively. \"\"\" import string, sys __all__ =", "i, n = 0, len(line) while i < n and", "\"\"\"Instantiate a line-oriented interpreter framework. The optional argument 'completekey' is", "is the current input line (lstripped), begidx and endidx are", "command is parsed out of each line by collecting the", "method used to pull in base class attributes # at", "this method is not overridden, it repeats the last nonempty", "a method 'do_foo()'; the do_ method is passed a single", "line-oriented command interpreters. These are often useful for test harnesses,", "postloop(self): \"\"\"Hook method executed once when the cmdloop() method is", "a time dir() didn't do it yet. return dir(self.__class__) def", "help topics, and undocumented commands. 6. The command '?' is", "'EOF' else: self.editline.prompt = self.prompt line = self.editline.readline() if not", "line-oriented interpreter framework. The optional argument 'completekey' is the readline", "upon which position the argument is in. The `default' method", "get_names(self): # This method used to pull in base class", "to action methods, passing them the remainder of the line", "size = len(list) if size == 1: self.stdout.write('%s\\n'%str(list[0])) return #", "line[i:].strip() return cmd, arg, line def onecmd(self, line): \"\"\"Interpret the", "it is printed out on interpreter startup. This value may", "input line when the command prefix is not recognized. If", "in the identchars member. 3. A command `foo' is dispatched", "of commands by the interpreter should stop. \"\"\" cmd, arg,", "raise TypeError(\"list[i] not a string for i in %s\" %", "is generated and issued. \"\"\" return line def postcmd(self, stop,", "self.complete except ImportError: print(\"Failed to import editline\") pass def cmdloop(self,", "if not isinstance(list[i], str)] if nonstrings: raise TypeError(\"list[i] not a", "which may be overridden in a subclass.) 5. There is", "not been entered, then complete against command list. Otherwise try", "func() else: names = self.get_names() cmds_doc = [] cmds_undoc =", "stop = self.postcmd(stop, line) self.postloop() finally: pass def precmd(self, line):", "when the cmdloop method is called, it is printed out", "check arg syntax try: func = getattr(self, 'help_' + arg)", "calling complete_foo() with arguments text, line, begidx, endidx. text is", "overridden prevname = '' for name in names: if name[:3]", "if cmd in help: cmds_doc.append(cmd) del help[cmd] elif getattr(self, name).__doc__:", "- stripped endidx = self.editline.get_endidx() - stripped if begidx>0: cmd,", "onecmd(self, line): \"\"\"Interpret the argument as though it had been", "x = list[i] texts.append(x) while texts and not texts[-1]: del", "line def postcmd(self, stop, line): \"\"\"Hook method executed just after", "parsed out of each line by collecting the prefix composed", "compfunc = self.completedefault else: try: compfunc = getattr(self, 'complete_' +", "self.stdout.write(\"%s\\n\"%str(doc)) return except AttributeError: pass self.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,))) return func()", "containing (command, args, line). 'command' and 'args' may be None", "= 0 for row in range(nrows): i = row +", "an input line when no command-specific complete_*() method is available.", "else: cmds_undoc.append(cmd) self.stdout.write(\"%s\\n\"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc, 15,80) self.print_topics(self.misc_header, list(help.keys()),15,80) self.print_topics(self.undoc_header, cmds_undoc,", "cmd, arg, line def onecmd(self, line): \"\"\"Interpret the argument as", "self.lastcmd = line if line == 'EOF' : print(\"\") print(\"Bye\")", "col in range(ncols): colwidth = 0 for row in range(nrows):", "# Try every row count from 1 upwards for nrows", "getattr(self, 'do_' + cmd) except AttributeError: return self.default(line) return func(arg)", "which could be used to provide different completion depending upon", "can be duplicates if routines overridden prevname = '' for", "strings as a compact set of columns. Each column is", "of the line as argument. \"\"\" self.preloop() try: if intro", "the do_ method is passed a single argument consisting of", "the line. 4. Typing an empty line repeats the last", "(line,len(line))) def completedefault(self, *ignored): \"\"\"Method called to complete an input", "dir() didn't do it yet. return dir(self.__class__) def complete_help(self, *args):", "* len(header))) self.columnize(cmds, maxcol-1) self.stdout.write(\"\\n\") def columnize(self, list, displaywidth=80): \"\"\"Display", "against, all returned matches must begin with it. line is", "texts and not texts[-1]: del texts[-1] for col in range(len(texts)):", "self.completekey: try: import editline self.editline = editline.editline(\"CMD\", self.stdin, self.stdout, sys.stderr)", "% (line,len(line))) def completedefault(self, *ignored): \"\"\"Method called to complete an", "self.completedefault else: try: compfunc = getattr(self, 'complete_' + cmd) except", "get list of completions. \"\"\" if state == 0: origline", "sys.stdin and sys.stdout are used. \"\"\" if stdin is not", "EOFError: line = 'EOF' else: self.editline.prompt = self.prompt line =", "elif getattr(self, name).__doc__: cmds_doc.append(cmd) else: cmds_undoc.append(cmd) self.stdout.write(\"%s\\n\"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc, 15,80)", "stdout is not None: self.stdout = stdout else: self.stdout =", "arg): 'List available commands with \"help\" or detailed help with", "available, command completion is done automatically. The optional arguments stdin", "len(x)) colwidths.append(colwidth) totwidth += colwidth + 2 if totwidth >", "to be; see the precmd() and postcmd() methods for useful", "= 1 colwidths = [0] for row in range(nrows): texts", "del texts[-1] for col in range(len(texts)): texts[col] = texts[col].ljust(colwidths[col]) self.stdout.write(\"%s\\n\"%str(\"", "the line into a command name and a string containing", "if cmd == '': return self.default(line) else: try: func =", "These are often useful for test harnesses, administrative tools, and", "that will later be wrapped in a more sophisticated interface.", "and output file objects; if not specified, sys.stdin and sys.stdout", "= [0] for row in range(nrows): texts = [] for", "'(Cmd) ' IDENTCHARS = string.ascii_letters + string.digits + '_' class", "if a do_shell method exists. 7. If completion is enabled,", "set(a[5:] for a in self.get_names() if a.startswith('help_' + args[0])) return", "self.stdout.write(\"<empty>\\n\") return nonstrings = [i for i in range(len(list)) if", "if self.use_rawinput: try: line = input(self.prompt) except EOFError: line =", "to call complete_<command> to get list of completions. \"\"\" if", "the remainder of the line. 4. Typing an empty line", "= string.ascii_letters + string.digits + '_' class ElCmd: \"\"\"A simple", "value is a flag indicating whether interpretation of commands by", "a string for i in %s\" % \", \".join(map(str, nonstrings)))", "elif line[0] == '?': line = 'help ' + line[1:]", "is nonempty when the cmdloop method is called, it is", "not len(line): line = 'EOF' else: line = line.rstrip('\\r\\n') line", "response to the prompt. This may be overridden, but should", "interpreters. Interpreters constructed with this class obey the following conventions:", "compfunc = getattr(self, 'complete_' + cmd) except AttributeError: compfunc =", "if not line: return None, None, line elif line[0] ==", "def precmd(self, line): \"\"\"Hook method executed just before the command", "sys.stdout self.cmdqueue = [] self.completekey = completekey if not self.use_rawinput", "completedefault(self, *ignored): \"\"\"Method called to complete an input line when", "range(1, len(list)): ncols = (size+nrows-1) // nrows colwidths = []", "= self.onecmd(line) stop = self.postcmd(stop, line) self.postloop() finally: pass def", "row in range(nrows): i = row + nrows*col if i", "line[i] in self.identchars: i = i+1 cmd, arg = line[:i],", "dispatch is finished.\"\"\" return stop def preloop(self): \"\"\"Hook method executed", "input and output file objects; if not specified, sys.stdin and", "the prefix composed of characters in the identchars member. 3.", "= max(colwidth, len(x)) colwidths.append(colwidth) totwidth += colwidth + 2 if", "just after a command dispatch is finished.\"\"\" return stop def", "and dispatch to action methods, passing them the remainder of", "used. \"\"\" if stdin is not None: self.stdin = stdin", "(arg,))) return func() else: names = self.get_names() cmds_doc = []", "stripped = len(origline) - len(line) begidx = self.editline.get_begidx() - stripped", "name cmd=name[3:] if cmd in help: cmds_doc.append(cmd) del help[cmd] elif", "and prototypes that will later be wrapped in a more", "size: x = \"\" else: x = list[i] texts.append(x) while", "= None doc_leader = \"\" doc_header = \"Documented commands (type", "enabled, completing commands will be done automatically, and completing of", "done by calling complete_foo() with arguments text, line, begidx, endidx.", "as a superclass of an interpreter class you define yourself", "self.cmdqueue.pop(0) else: if self.use_rawinput: try: line = input(self.prompt) except EOFError:", "if cmds: self.stdout.write(\"%s\\n\"%str(header)) if self.ruler: self.stdout.write(\"%s\\n\"%str(self.ruler * len(header))) self.columnize(cmds, maxcol-1)", "routines overridden prevname = '' for name in names: if", "= [\"Cmd\"] PROMPT = '(Cmd) ' IDENTCHARS = string.ascii_letters +", "x = list[i] colwidth = max(colwidth, len(x)) colwidths.append(colwidth) totwidth +=", "if not list: self.stdout.write(\"<empty>\\n\") return nonstrings = [i for i", "nrows colwidths = [] totwidth = -2 for col in", "End of file on input is processed as the command", "def onecmd(self, line): \"\"\"Interpret the argument as though it had", "= texts[col].ljust(colwidths[col]) self.stdout.write(\"%s\\n\"%str(\" \".join(texts))) class MyCmd(ElCmd,object): def do_bleep(self, s): print(\"bleep!\")", "to the prompt. This may be overridden, but should not", "cmdloop() method. The data members `self.doc_header', `self.misc_header', and `self.undoc_header' set", "not texts[-1]: del texts[-1] for col in range(len(texts)): texts[col] =", "== '!': if hasattr(self, 'do_shell'): line = 'shell ' +", "the command 'EOF'. 2. A command is parsed out of", "line = 'EOF' else: line = line.rstrip('\\r\\n') line = self.precmd(line)", "-2 for col in range(ncols): colwidth = 0 for row", "origline = self.editline.get_line_buffer() line = origline.lstrip() stripped = len(origline) -", "topics) def do_help(self, arg): 'List available commands with \"help\" or", "cmdloop(self, intro=None): \"\"\"Repeatedly issue a prompt, accept input, parse an", "documented functions, miscellaneous topics, and undocumented functions respectively. \"\"\" import", "return func(arg) def emptyline(self): \"\"\"Called when an empty line is", "an empty line is entered in response to the prompt.", "once when the cmdloop() method is about to return. \"\"\"", "cmd\".' if arg: # XXX check arg syntax try: func", "+ string.digits + '_' class ElCmd: \"\"\"A simple framework for", "= self.cmdqueue.pop(0) else: if self.use_rawinput: try: line = input(self.prompt) except", "consisting of the remainder of the line. 4. Typing an", "be parsed. \"\"\" line = line.strip() if not line: return", "separator lines in the help messages. If empty, no ruler", "try: return self.completion_matches[state] except IndexError: return None def get_names(self): #", "i in range(len(list)) if not isinstance(list[i], str)] if nonstrings: raise", "no command-specific complete_*() method is available. By default, it returns", "detailed help with \"help cmd\".' if arg: # XXX check", "< n and line[i] in self.identchars: i = i+1 cmd,", "generated and issued. \"\"\" return line def postcmd(self, stop, line):", "all topics with defined help_ functions, broken into up to", "AttributeError: pass self.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,))) return func() else: names =", "== '?': line = 'help ' + line[1:] elif line[0]", "overridden to intercept commands for which there is no do_", "self.identchars: i = i+1 cmd, arg = line[:i], line[i:].strip() return", "if name[:5] == 'help_': help[name[5:]]=1 names.sort() # There can be", "self.editline.get_endidx() - stripped if begidx>0: cmd, args, foo = self.parseline(line)", "ncols = 1 colwidths = [0] for row in range(nrows):", "up to three topics; documented commands, miscellaneous help topics, and", "commands with \"help\" or detailed help with \"help cmd\".' if", "is string we are matching against, all returned matches must", "separated by two spaces (one was not legible enough). \"\"\"", "done automatically, and completing of commands args is done by", "is a flag indicating whether interpretation of commands by the", "member. 3. A command `foo' is dispatched to a method", "prompt, accept input, parse an initial prefix off the received", "cmd == '': compfunc = self.completedefault else: try: compfunc =", "= IDENTCHARS ruler = '=' lastcmd = '' intro =", "the cmdloop() method is called.\"\"\" pass def postloop(self): \"\"\"Hook method", "if name[:3] == 'do_': if name == prevname: continue prevname", "\"\"\"A generic class to build line-oriented command interpreters. Interpreters constructed", "a line-oriented interpreter framework. The optional argument 'completekey' is the", "action methods. \"\"\" prompt = PROMPT identchars = IDENTCHARS ruler", "= self.parseline(line) if not line: return self.emptyline() if cmd is", "name in names: if name[:5] == 'help_': help[name[5:]]=1 names.sort() #", "command `foo' is dispatched to a method 'do_foo()'; the do_", "line = 'EOF' else: self.editline.prompt = self.prompt line = self.editline.readline()", "The return value is a flag indicating whether interpretation of", "are often useful for test harnesses, administrative tools, and prototypes", "self.stdout = stdout else: self.stdout = sys.stdout self.cmdqueue = []", "is available, command completion is done automatically. The optional arguments", "= row + nrows*col if i >= size: x =", "sophisticated interface. A Cmd instance or subclass instance is a", "# There can be duplicates if routines overridden prevname =", "self.preloop() try: if intro is not None: self.intro = intro", "\"\"\"Hook method executed just after a command dispatch is finished.\"\"\"", "is passed a single argument consisting of the remainder of", "prevname: continue prevname = name cmd=name[3:] if cmd in help:", "an error message and returns. \"\"\" self.stdout.write('*** Unknown syntax: %s", "def __init__(self, completekey='tab', stdin=None, stdout=None): \"\"\"Instantiate a line-oriented interpreter framework.", "MyCmd(ElCmd,object): def do_bleep(self, s): print(\"bleep!\") def do_blob(self, s): print(\"blob!\") def", "\"*** No help on %s\" use_rawinput = False def __init__(self,", "stdin=None, stdout=None): \"\"\"Instantiate a line-oriented interpreter framework. The optional argument", "be overridden in a subclass.) 5. There is a predefined", "completions for commands that have no complete_ method. The data", "all returned matches must begin with it. line is the", "The optional argument 'completekey' is the readline name of a", "== 'EOF' : print(\"\") print(\"Bye\") sys.exit(0) if cmd == '':", "line = 'shell ' + line[1:] else: return None, None,", "tools, and prototypes that will later be wrapped in a", "size: break x = list[i] colwidth = max(colwidth, len(x)) colwidths.append(colwidth)", "a synonym for `shell', if a do_shell method exists. 7.", "is drawn. It defaults to \"=\". If the value of", "in help: cmds_doc.append(cmd) del help[cmd] elif getattr(self, name).__doc__: cmds_doc.append(cmd) else:", "via an optional argument to the cmdloop() method. The data", "argument to the cmdloop() method. The data members `self.doc_header', `self.misc_header',", "`help' method. Given an argument `topic', it calls the command", "This value may be overridden via an optional argument to", "Unknown syntax: %s (%d)\\n' % (line,len(line))) def completedefault(self, *ignored): \"\"\"Method", "and endidx are the beginning and end indexes of the", "instance is a line-oriented interpreter framework. There is no good", "subclass.) 5. There is a predefined `help' method. Given an", "be None if the line couldn't be parsed. \"\"\" line", "functions respectively. \"\"\" import string, sys __all__ = [\"Cmd\"] PROMPT", "indexes of the text being matched, which could be used", "an interpreter class you define yourself in order to inherit", "del help[cmd] elif getattr(self, name).__doc__: cmds_doc.append(cmd) else: cmds_undoc.append(cmd) self.stdout.write(\"%s\\n\"%str(self.doc_leader)) self.print_topics(self.doc_header,", "value may be overridden via an optional argument to the", "complete(self, text, state): \"\"\"Return the next possible completion for 'text'.", "\".join(map(str, nonstrings))) size = len(list) if size == 1: self.stdout.write('%s\\n'%str(list[0]))", "stripped if begidx>0: cmd, args, foo = self.parseline(line) if cmd", "sys.stderr) self.editline.rl_completer = self.complete except ImportError: print(\"Failed to import editline\")", "line): \"\"\"Parse the line into a command name and a", "= '' for name in names: if name[:3] == 'do_':", "that have no complete_ method. The data member `self.ruler' sets", "return None def get_names(self): # This method used to pull", "displaywidth: break if totwidth <= displaywidth: break else: nrows =", "normally need to be; see the precmd() and postcmd() methods", "self.completenames self.completion_matches = compfunc(text, line, begidx, endidx) try: return self.completion_matches[state]", "was not legible enough). \"\"\" if not list: self.stdout.write(\"<empty>\\n\") return", "not legible enough). \"\"\" if not list: self.stdout.write(\"<empty>\\n\") return nonstrings", "= stdin else: self.stdin = sys.stdin if stdout is not", "self.ruler: self.stdout.write(\"%s\\n\"%str(self.ruler * len(header))) self.columnize(cmds, maxcol-1) self.stdout.write(\"\\n\") def columnize(self, list,", "this class obey the following conventions: 1. End of file", "for name in names: if name[:3] == 'do_': if name", "be overridden to intercept commands for which there is no", "interpretation of commands by the interpreter should stop. \"\"\" cmd,", "colwidths = [0] for row in range(nrows): texts = []", "origline.lstrip() stripped = len(origline) - len(line) begidx = self.editline.get_begidx() -", "It defaults to \"=\". If the value of `self.intro' is", "= self.editline.readline() if not len(line): line = 'EOF' else: line", "response to the prompt. If this method is not overridden,", "of commands args is done by calling complete_foo() with arguments", "input(self.prompt) except EOFError: line = 'EOF' else: self.editline.prompt = self.prompt", "return func() else: names = self.get_names() cmds_doc = [] cmds_undoc", "name of a completion key; it defaults to the Tab", "reason to instantiate Cmd itself; rather, it's useful as a", "'!': if hasattr(self, 'do_shell'): line = 'shell ' + line[1:]", "cmds_doc, 15,80) self.print_topics(self.misc_header, list(help.keys()),15,80) self.print_topics(self.undoc_header, cmds_undoc, 15,80) def print_topics(self, header,", "if the line couldn't be parsed. \"\"\" line = line.strip()", "parse an initial prefix off the received input, and dispatch", "text being matched, which could be used to provide different", "last command. (Actually, it calls the method `emptyline', which may", "which position the argument is in. The `default' method may", "nonstrings))) size = len(list) if size == 1: self.stdout.write('%s\\n'%str(list[0])) return", "A command `foo' is dispatched to a method 'do_foo()'; the", "== 'do_': if name == prevname: continue prevname = name", "arg, line def onecmd(self, line): \"\"\"Interpret the argument as though", "the cmdloop method is called, it is printed out on", "None, None, line elif line[0] == '?': line = 'help", "to the Tab key. If completekey is not None and", "to the cmdloop() method. The data members `self.doc_header', `self.misc_header', and", "the following conventions: 1. End of file on input is", "<topic>):\" misc_header = \"Miscellaneous help topics:\" undoc_header = \"Undocumented commands:\"", "self.stdin = sys.stdin if stdout is not None: self.stdout =", "for name in names: if name[:5] == 'help_': help[name[5:]]=1 names.sort()", "None if the line couldn't be parsed. \"\"\" line =", "do_ method. The `completedefault' method may be overridden to intercept", "when no command-specific complete_*() method is available. By default, it", "interpreter framework. There is no good reason to instantiate Cmd", "on an input line when the command prefix is not", "list. Otherwise try to call complete_<command> to get list of", "This may be overridden, but should not normally need to", "texts[-1] for col in range(len(texts)): texts[col] = texts[col].ljust(colwidths[col]) self.stdout.write(\"%s\\n\"%str(\" \".join(texts)))", "else: line = line.rstrip('\\r\\n') line = self.precmd(line) stop = self.onecmd(line)", "string we are matching against, all returned matches must begin", "string.digits + '_' class ElCmd: \"\"\"A simple framework for writing", "def do_help(self, arg): 'List available commands with \"help\" or detailed", "self.editline.readline() if not len(line): line = 'EOF' else: line =", "1. End of file on input is processed as the", "col in range(len(texts)): texts[col] = texts[col].ljust(colwidths[col]) self.stdout.write(\"%s\\n\"%str(\" \".join(texts))) class MyCmd(ElCmd,object):", "finally: pass def precmd(self, line): \"\"\"Hook method executed just before", "for 'text'. If a command has not been entered, then", "is available. By default, it returns an empty list. \"\"\"", "returns. \"\"\" self.stdout.write('*** Unknown syntax: %s (%d)\\n' % (line,len(line))) def", "\"\"\"Method called to complete an input line when no command-specific", "stop = None while not stop: if self.cmdqueue: line =", "colwidth = max(colwidth, len(x)) colwidths.append(colwidth) totwidth += colwidth + 2", "complete against command list. Otherwise try to call complete_<command> to", "doc_header = \"Documented commands (type help <topic>):\" misc_header = \"Miscellaneous", "the last nonempty command entered. \"\"\" if self.lastcmd: return self.onecmd(self.lastcmd)", "self.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,))) return func() else: names = self.get_names() cmds_doc", "args is done by calling complete_foo() with arguments text, line,", "commands (type help <topic>):\" misc_header = \"Miscellaneous help topics:\" undoc_header", "line = self.parseline(line) if not line: return self.emptyline() if cmd", "string for i in %s\" % \", \".join(map(str, nonstrings))) size", "(type help <topic>):\" misc_header = \"Miscellaneous help topics:\" undoc_header =", "as though it had been typed in response to the", "provide different completion depending upon which position the argument is", "cmd, arg = line[:i], line[i:].strip() return cmd, arg, line def", "= [] self.completekey = completekey if not self.use_rawinput and self.completekey:", "def do_blob(self, s): print(\"blob!\") def do_bob(self, s): print(\"bob!\") def do_mods(self,", "be used to provide different completion depending upon which position", "'args' may be None if the line couldn't be parsed.", "arguments. Returns a tuple containing (command, args, line). 'command' and", "the next possible completion for 'text'. If a command has", "will later be wrapped in a more sophisticated interface. A", "7. If completion is enabled, completing commands will be done", "line-oriented interpreter framework. There is no good reason to instantiate", "(one was not legible enough). \"\"\" if not list: self.stdout.write(\"<empty>\\n\")", "three topics; documented commands, miscellaneous help topics, and undocumented commands.", "if not line: return self.emptyline() if cmd is None: return", "size == 1: self.stdout.write('%s\\n'%str(list[0])) return # Try every row count", "the readline name of a completion key; it defaults to", "line[0] == '?': line = 'help ' + line[1:] elif", "return dir(self.__class__) def complete_help(self, *args): commands = set(self.completenames(*args)) topics =", "lastcmd = '' intro = None doc_leader = \"\" doc_header", "[] for col in range(ncols): i = row + nrows*col", "do_ method is passed a single argument consisting of the", "'' intro = None doc_leader = \"\" doc_header = \"Documented", "is called.\"\"\" pass def postloop(self): \"\"\"Hook method executed once when" ]
[ "list_display = ('user', 'code', 'total_price', 'shipping_status', 'created_at') list_display_links = ('user',)", "total_price(self, obj): return obj.cart.total_price def has_add_permission(self, request): return False @admin.register(receiverInfo)", "request): return False @admin.register(receiverInfo) class receiverInfoAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display", "= 25 search_fields = ('user__phone_number', 'user__email', 'code') readonly_fields = ('user','cart',", "OrderAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display = ('user', 'code', 'total_price', 'shipping_status',", "False @admin.register(receiverInfo) class receiverInfoAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display = ('id',", "@admin.register(receiverInfo) class receiverInfoAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display = ('id', 'full_name',", "obj): return obj.cart.total_price def has_add_permission(self, request): return False @admin.register(receiverInfo) class", "'full_name') list_filter = ('created_at',) list_per_page = 25 search_fields = ('full_name',", "= ('shipping_status', 'payment_mode', 'created_at') list_per_page = 25 search_fields = ('user__phone_number',", "obj.cart.total_price def has_add_permission(self, request): return False @admin.register(receiverInfo) class receiverInfoAdmin(admin.ModelAdmin): date_hierarchy", ".models import Order, receiverInfo @admin.register(Order) class OrderAdmin(admin.ModelAdmin): date_hierarchy = 'created_at'", "receiverInfoAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display = ('id', 'full_name', 'phone_number', 'address',", "return False @admin.register(receiverInfo) class receiverInfoAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display =", "('user','cart', 'receiver', 'payment_mode', 'shipping_status', 'code') def total_price(self, obj): return obj.cart.total_price", "class OrderAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display = ('user', 'code', 'total_price',", "@admin.register(Order) class OrderAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display = ('user', 'code',", "25 search_fields = ('full_name', 'phone_number', 'address') readonly_fields = ('full_name', 'phone_number',", "'shipping_status', 'code') def total_price(self, obj): return obj.cart.total_price def has_add_permission(self, request):", "'payment_mode', 'created_at') list_per_page = 25 search_fields = ('user__phone_number', 'user__email', 'code')", "'code', 'total_price', 'shipping_status', 'created_at') list_display_links = ('user',) list_editable = ('shipping_status',)", "= 'created_at' list_display = ('user', 'code', 'total_price', 'shipping_status', 'created_at') list_display_links", "'created_at') list_display_links = ('id', 'full_name') list_filter = ('created_at',) list_per_page =", "date_hierarchy = 'created_at' list_display = ('id', 'full_name', 'phone_number', 'address', 'created_at')", "= 25 search_fields = ('full_name', 'phone_number', 'address') readonly_fields = ('full_name',", "has_add_permission(self, request): return False @admin.register(receiverInfo) class receiverInfoAdmin(admin.ModelAdmin): date_hierarchy = 'created_at'", "= ('user',) list_editable = ('shipping_status',) list_filter = ('shipping_status', 'payment_mode', 'created_at')", "return obj.cart.total_price def has_add_permission(self, request): return False @admin.register(receiverInfo) class receiverInfoAdmin(admin.ModelAdmin):", "= 'created_at' list_display = ('id', 'full_name', 'phone_number', 'address', 'created_at') list_display_links", "'phone_number', 'address', 'created_at') list_display_links = ('id', 'full_name') list_filter = ('created_at',)", "list_filter = ('shipping_status', 'payment_mode', 'created_at') list_per_page = 25 search_fields =", "receiverInfo @admin.register(Order) class OrderAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display = ('user',", "= ('created_at',) list_per_page = 25 search_fields = ('full_name', 'phone_number', 'address')", "class receiverInfoAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display = ('id', 'full_name', 'phone_number',", "'user__email', 'code') readonly_fields = ('user','cart', 'receiver', 'payment_mode', 'shipping_status', 'code') def", "'code') def total_price(self, obj): return obj.cart.total_price def has_add_permission(self, request): return", "= ('user', 'code', 'total_price', 'shipping_status', 'created_at') list_display_links = ('user',) list_editable", "('shipping_status',) list_filter = ('shipping_status', 'payment_mode', 'created_at') list_per_page = 25 search_fields", "('user__phone_number', 'user__email', 'code') readonly_fields = ('user','cart', 'receiver', 'payment_mode', 'shipping_status', 'code')", "date_hierarchy = 'created_at' list_display = ('user', 'code', 'total_price', 'shipping_status', 'created_at')", "'full_name', 'phone_number', 'address', 'created_at') list_display_links = ('id', 'full_name') list_filter =", "= ('id', 'full_name') list_filter = ('created_at',) list_per_page = 25 search_fields", "list_display = ('id', 'full_name', 'phone_number', 'address', 'created_at') list_display_links = ('id',", "list_display_links = ('id', 'full_name') list_filter = ('created_at',) list_per_page = 25", "'total_price', 'shipping_status', 'created_at') list_display_links = ('user',) list_editable = ('shipping_status',) list_filter", "('created_at',) list_per_page = 25 search_fields = ('full_name', 'phone_number', 'address') readonly_fields", "= ('id', 'full_name', 'phone_number', 'address', 'created_at') list_display_links = ('id', 'full_name')", "search_fields = ('user__phone_number', 'user__email', 'code') readonly_fields = ('user','cart', 'receiver', 'payment_mode',", "def total_price(self, obj): return obj.cart.total_price def has_add_permission(self, request): return False", "('id', 'full_name', 'phone_number', 'address', 'created_at') list_display_links = ('id', 'full_name') list_filter", "= ('user__phone_number', 'user__email', 'code') readonly_fields = ('user','cart', 'receiver', 'payment_mode', 'shipping_status',", "def has_add_permission(self, request): return False @admin.register(receiverInfo) class receiverInfoAdmin(admin.ModelAdmin): date_hierarchy =", "django.contrib import admin from .models import Order, receiverInfo @admin.register(Order) class", "import admin from .models import Order, receiverInfo @admin.register(Order) class OrderAdmin(admin.ModelAdmin):", "'payment_mode', 'shipping_status', 'code') def total_price(self, obj): return obj.cart.total_price def has_add_permission(self,", "('user',) list_editable = ('shipping_status',) list_filter = ('shipping_status', 'payment_mode', 'created_at') list_per_page", "from django.contrib import admin from .models import Order, receiverInfo @admin.register(Order)", "Order, receiverInfo @admin.register(Order) class OrderAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display =", "'created_at') list_per_page = 25 search_fields = ('user__phone_number', 'user__email', 'code') readonly_fields", "'created_at') list_display_links = ('user',) list_editable = ('shipping_status',) list_filter = ('shipping_status',", "search_fields = ('full_name', 'phone_number', 'address') readonly_fields = ('full_name', 'phone_number', 'address')", "25 search_fields = ('user__phone_number', 'user__email', 'code') readonly_fields = ('user','cart', 'receiver',", "list_filter = ('created_at',) list_per_page = 25 search_fields = ('full_name', 'phone_number',", "list_display_links = ('user',) list_editable = ('shipping_status',) list_filter = ('shipping_status', 'payment_mode',", "import Order, receiverInfo @admin.register(Order) class OrderAdmin(admin.ModelAdmin): date_hierarchy = 'created_at' list_display", "from .models import Order, receiverInfo @admin.register(Order) class OrderAdmin(admin.ModelAdmin): date_hierarchy =", "<reponame>Shanu85/FCS_Project from django.contrib import admin from .models import Order, receiverInfo", "'created_at' list_display = ('id', 'full_name', 'phone_number', 'address', 'created_at') list_display_links =", "readonly_fields = ('user','cart', 'receiver', 'payment_mode', 'shipping_status', 'code') def total_price(self, obj):", "list_per_page = 25 search_fields = ('full_name', 'phone_number', 'address') readonly_fields =", "'address', 'created_at') list_display_links = ('id', 'full_name') list_filter = ('created_at',) list_per_page", "('shipping_status', 'payment_mode', 'created_at') list_per_page = 25 search_fields = ('user__phone_number', 'user__email',", "admin from .models import Order, receiverInfo @admin.register(Order) class OrderAdmin(admin.ModelAdmin): date_hierarchy", "'receiver', 'payment_mode', 'shipping_status', 'code') def total_price(self, obj): return obj.cart.total_price def", "'code') readonly_fields = ('user','cart', 'receiver', 'payment_mode', 'shipping_status', 'code') def total_price(self,", "('user', 'code', 'total_price', 'shipping_status', 'created_at') list_display_links = ('user',) list_editable =", "'shipping_status', 'created_at') list_display_links = ('user',) list_editable = ('shipping_status',) list_filter =", "= ('shipping_status',) list_filter = ('shipping_status', 'payment_mode', 'created_at') list_per_page = 25", "= ('user','cart', 'receiver', 'payment_mode', 'shipping_status', 'code') def total_price(self, obj): return", "list_editable = ('shipping_status',) list_filter = ('shipping_status', 'payment_mode', 'created_at') list_per_page =", "'created_at' list_display = ('user', 'code', 'total_price', 'shipping_status', 'created_at') list_display_links =", "('id', 'full_name') list_filter = ('created_at',) list_per_page = 25 search_fields =", "list_per_page = 25 search_fields = ('user__phone_number', 'user__email', 'code') readonly_fields =" ]
[ "\"\"\" return self._size def insert(self, val): \"\"\" basic insertion method", "node before node at val \"\"\" new_node = Node(new_val) current", "self._size += 1 def append(self, val): \"\"\" appends node to", "current = current._next if current._next is None: raise ValueError(\"Data not", "val and we will need this \"\"\" return '<head> =>", "self.head._next while current._next is not None: if current._next.val == val:", "list\") def insert_after(self, val, new_val): \"\"\" inserts node after node", "this is where we can see the list\"\"\" def __len__(self):", "= current._next._next current._next = new_node self._size += 1 break current", "None self._size = 0 for item in reversed(iter): self.insert(item) def", "if current.val == val: new_node._next = current._next._next current._next = new_node", "val: new_node._next = current._next current._next = new_node self._size += 1", "to front of LL \"\"\" self.head = Node(val, self.head) self._size", "in reversed(iter): self.insert(item) def __repr__(self): \"\"\" assumes head will have", "not in list\") def insert_after(self, val, new_val): \"\"\" inserts node", "k): \"\"\" returns node at kth from end \"\"\" if", "None: current._next = current._next._next if current._next._next is None: current._next._next =", "0 for item in reversed(iter): self.insert(item) def __repr__(self): \"\"\" assumes", "current.val == val: new_node._next = current._next._next current._next = new_node self._size", "we can see the list\"\"\" def __len__(self): \"\"\" returns size", "= self.head._next while current._next is not None: current._next = current._next._next", "iter=[]): self.head = None self._size = 0 for item in", "is None: current._next._next = new_node new_node._next is None self._size +=", "node at val \"\"\" new_node = Node(new_val) current = self.head._next", "from node import Node class LinkedList: \"\"\" initializes LL \"\"\"", "current._next._next = new_node new_node._next is None self._size += 1 return", "in list\") def kth_from_end(self, k): \"\"\" returns node at kth", "at kth from end \"\"\" if self._size - k <", "if self._size - k < 0: raise AttributeError current =", "a val and we will need this \"\"\" return '<head>", "val): \"\"\" basic insertion method for adding to front of", "= self.head._next while current._next is not None: if current._next.val ==", "kth from end \"\"\" if self._size - k < 0:", "for item in reversed(iter): self.insert(item) def __repr__(self): \"\"\" assumes head", "LL \"\"\" return self._size def insert(self, val): \"\"\" basic insertion", "before node at val \"\"\" new_node = Node(new_val) current =", "new_val): \"\"\" inserts node after node at val \"\"\" new_node", "current._next if current._next is None: raise ValueError(\"Data not in list\")", "need this \"\"\" return '<head> => {}'.format(self.head.val) def __str__(self): \"\"\"", "is not None: current._next = current._next._next if current._next._next is None:", "self._size - k < 0: raise AttributeError current = self.head", "kth_from_end(self, k): \"\"\" returns node at kth from end \"\"\"", "range(self._size - k - 1): current = current._next return current", "if current._next._next is None: current._next._next = new_node new_node._next is None", "while current._next is not None: if current.val == val: new_node._next", "self.head = None self._size = 0 for item in reversed(iter):", "current._next is not None: if current._next.val == val: new_node._next =", "where we can see the list\"\"\" def __len__(self): \"\"\" returns", "\"\"\" assumes head will have a val and we will", "front of LL \"\"\" self.head = Node(val, self.head) self._size +=", "import Node class LinkedList: \"\"\" initializes LL \"\"\" def __init__(self,", "ValueError(\"Data not in list\") def insert_after(self, val, new_val): \"\"\" inserts", "'<head> => {}'.format(self.head.val) def __str__(self): \"\"\" this is where we", "inserts node after node at val \"\"\" new_node = Node(new_val)", "AttributeError current = self.head for i in range(self._size - k", "self.head = Node(val, self.head) self._size += 1 def append(self, val):", "is not None: if current._next.val == val: new_node._next = current._next", "method for adding to front of LL \"\"\" self.head =", "new_node._next def insert_before(self, val, new_val): \"\"\" inserts node before node", "\"\"\" returns node at kth from end \"\"\" if self._size", "in list\") def insert_after(self, val, new_val): \"\"\" inserts node after", "LL \"\"\" def __init__(self, iter=[]): self.head = None self._size =", "node at kth from end \"\"\" if self._size - k", "in range(self._size - k - 1): current = current._next return", "current._next = current._next._next if current._next._next is None: current._next._next = new_node", "\"\"\" def __init__(self, iter=[]): self.head = None self._size = 0", "current._next is None: raise ValueError(\"Data not in list\") def insert_after(self,", "this \"\"\" return '<head> => {}'.format(self.head.val) def __str__(self): \"\"\" this", "current._next = new_node self._size += 1 break current = current._next", "\"\"\" inserts node after node at val \"\"\" new_node =", "will need this \"\"\" return '<head> => {}'.format(self.head.val) def __str__(self):", "raise ValueError(\"Data not in list\") def kth_from_end(self, k): \"\"\" returns", "not in list\") def kth_from_end(self, k): \"\"\" returns node at", "\"\"\" if self._size - k < 0: raise AttributeError current", "1 def append(self, val): \"\"\" appends node to the end", "= current._next._next if current._next._next is None: current._next._next = new_node new_node._next", "- k < 0: raise AttributeError current = self.head for", "None: if current._next.val == val: new_node._next = current._next current._next =", "for adding to front of LL \"\"\" self.head = Node(val,", "\"\"\" appends node to the end of the LL \"\"\"", "return self._size def insert(self, val): \"\"\" basic insertion method for", "= self.head for i in range(self._size - k - 1):", "0: raise AttributeError current = self.head for i in range(self._size", "of LL \"\"\" self.head = Node(val, self.head) self._size += 1", "adding to front of LL \"\"\" self.head = Node(val, self.head)", "self._size += 1 return new_node._next def insert_before(self, val, new_val): \"\"\"", "after node at val \"\"\" new_node = Node(new_val) current =", "list\") def kth_from_end(self, k): \"\"\" returns node at kth from", "\"\"\" return '<head> => {}'.format(self.head.val) def __str__(self): \"\"\" this is", "val \"\"\" new_node = Node(new_val) current = self.head._next while current._next", "is not None: if current.val == val: new_node._next = current._next._next", "the LL \"\"\" new_node = Node(val, None) current = self.head._next", "not None: current._next = current._next._next if current._next._next is None: current._next._next", "class LinkedList: \"\"\" initializes LL \"\"\" def __init__(self, iter=[]): self.head", "i in range(self._size - k - 1): current = current._next", "insert_before(self, val, new_val): \"\"\" inserts node before node at val", "new_node = Node(new_val) current = self.head._next while current._next is not", "initializes LL \"\"\" def __init__(self, iter=[]): self.head = None self._size", "raise ValueError(\"Data not in list\") def insert_after(self, val, new_val): \"\"\"", "self._size += 1 break current = current._next if current._next is", "None: if current.val == val: new_node._next = current._next._next current._next =", "self._size def insert(self, val): \"\"\" basic insertion method for adding", "at val \"\"\" new_node = Node(new_val) current = self.head._next while", "None self._size += 1 return new_node._next def insert_before(self, val, new_val):", "def __repr__(self): \"\"\" assumes head will have a val and", "return new_node._next def insert_before(self, val, new_val): \"\"\" inserts node before", "def insert_before(self, val, new_val): \"\"\" inserts node before node at", "< 0: raise AttributeError current = self.head for i in", "\"\"\" initializes LL \"\"\" def __init__(self, iter=[]): self.head = None", "self._size = 0 for item in reversed(iter): self.insert(item) def __repr__(self):", "have a val and we will need this \"\"\" return", "node to the end of the LL \"\"\" new_node =", "return '<head> => {}'.format(self.head.val) def __str__(self): \"\"\" this is where", "insertion method for adding to front of LL \"\"\" self.head", "+= 1 def append(self, val): \"\"\" appends node to the", "if current._next is None: raise ValueError(\"Data not in list\") def", "1 break current = current._next if current._next is None: raise", "+= 1 break current = current._next if current._next is None:", "None: current._next._next = new_node new_node._next is None self._size += 1", "current = self.head for i in range(self._size - k -", "<gh_stars>0 from node import Node class LinkedList: \"\"\" initializes LL", "=> {}'.format(self.head.val) def __str__(self): \"\"\" this is where we can", "current._next current._next = new_node self._size += 1 break current =", "size of LL \"\"\" return self._size def insert(self, val): \"\"\"", "new_node._next = current._next._next current._next = new_node self._size += 1 break", "val: new_node._next = current._next._next current._next = new_node self._size += 1", "def append(self, val): \"\"\" appends node to the end of", "\"\"\" this is where we can see the list\"\"\" def", "new_node new_node._next is None self._size += 1 return new_node._next def", "returns node at kth from end \"\"\" if self._size -", "is None: raise ValueError(\"Data not in list\") def kth_from_end(self, k):", "current = self.head._next while current._next is not None: current._next =", "will have a val and we will need this \"\"\"", "val, new_val): \"\"\" inserts node after node at val \"\"\"", "None: raise ValueError(\"Data not in list\") def kth_from_end(self, k): \"\"\"", "current._next._next if current._next._next is None: current._next._next = new_node new_node._next is", "\"\"\" returns size of LL \"\"\" return self._size def insert(self,", "node import Node class LinkedList: \"\"\" initializes LL \"\"\" def", "self.insert(item) def __repr__(self): \"\"\" assumes head will have a val", "LinkedList: \"\"\" initializes LL \"\"\" def __init__(self, iter=[]): self.head =", "== val: new_node._next = current._next._next current._next = new_node self._size +=", "def insert(self, val): \"\"\" basic insertion method for adding to", "append(self, val): \"\"\" appends node to the end of the", "break current = current._next if current._next is None: raise ValueError(\"Data", "end of the LL \"\"\" new_node = Node(val, None) current", "end \"\"\" if self._size - k < 0: raise AttributeError", "def kth_from_end(self, k): \"\"\" returns node at kth from end", "can see the list\"\"\" def __len__(self): \"\"\" returns size of", "self.head._next while current._next is not None: current._next = current._next._next if", "insert_after(self, val, new_val): \"\"\" inserts node after node at val", "\"\"\" new_node = Node(val, None) current = self.head._next while current._next", "Node class LinkedList: \"\"\" initializes LL \"\"\" def __init__(self, iter=[]):", "LL \"\"\" self.head = Node(val, self.head) self._size += 1 def", "= Node(val, None) current = self.head._next while current._next is not", "current = self.head._next while current._next is not None: if current._next.val", "= current._next current._next = new_node self._size += 1 break current", "\"\"\" self.head = Node(val, self.head) self._size += 1 def append(self,", "k < 0: raise AttributeError current = self.head for i", "is None self._size += 1 return new_node._next def insert_before(self, val,", "returns size of LL \"\"\" return self._size def insert(self, val):", "current._next._next current._next = new_node self._size += 1 break current =", "__len__(self): \"\"\" returns size of LL \"\"\" return self._size def", "__repr__(self): \"\"\" assumes head will have a val and we", "== val: new_node._next = current._next current._next = new_node self._size +=", "= Node(new_val) current = self.head._next while current._next is not None:", "while current._next is not None: current._next = current._next._next if current._next._next", "from end \"\"\" if self._size - k < 0: raise", "the end of the LL \"\"\" new_node = Node(val, None)", "is where we can see the list\"\"\" def __len__(self): \"\"\"", "__str__(self): \"\"\" this is where we can see the list\"\"\"", "= new_node new_node._next is None self._size += 1 return new_node._next", "Node(val, self.head) self._size += 1 def append(self, val): \"\"\" appends", "and we will need this \"\"\" return '<head> => {}'.format(self.head.val)", "the list\"\"\" def __len__(self): \"\"\" returns size of LL \"\"\"", "def __str__(self): \"\"\" this is where we can see the", "None: raise ValueError(\"Data not in list\") def insert_after(self, val, new_val):", "LL \"\"\" new_node = Node(val, None) current = self.head._next while", "\"\"\" basic insertion method for adding to front of LL", "raise AttributeError current = self.head for i in range(self._size -", "current._next is not None: if current.val == val: new_node._next =", "def insert_after(self, val, new_val): \"\"\" inserts node after node at", "new_node._next is None self._size += 1 return new_node._next def insert_before(self,", "val, new_val): \"\"\" inserts node before node at val \"\"\"", "def __init__(self, iter=[]): self.head = None self._size = 0 for", "for i in range(self._size - k - 1): current =", "is None: raise ValueError(\"Data not in list\") def insert_after(self, val,", "None) current = self.head._next while current._next is not None: current._next", "item in reversed(iter): self.insert(item) def __repr__(self): \"\"\" assumes head will", "self.head._next while current._next is not None: if current.val == val:", "insert(self, val): \"\"\" basic insertion method for adding to front", "current._next.val == val: new_node._next = current._next current._next = new_node self._size", "= Node(val, self.head) self._size += 1 def append(self, val): \"\"\"", "node after node at val \"\"\" new_node = Node(new_val) current", "Node(new_val) current = self.head._next while current._next is not None: if", "basic insertion method for adding to front of LL \"\"\"", "new_node self._size += 1 break current = current._next if current._next", "not None: if current._next.val == val: new_node._next = current._next current._next", "head will have a val and we will need this", "appends node to the end of the LL \"\"\" new_node", "list\"\"\" def __len__(self): \"\"\" returns size of LL \"\"\" return", "Node(val, None) current = self.head._next while current._next is not None:", "ValueError(\"Data not in list\") def kth_from_end(self, k): \"\"\" returns node", "assumes head will have a val and we will need", "= 0 for item in reversed(iter): self.insert(item) def __repr__(self): \"\"\"", "of the LL \"\"\" new_node = Node(val, None) current =", "see the list\"\"\" def __len__(self): \"\"\" returns size of LL", "= None self._size = 0 for item in reversed(iter): self.insert(item)", "of LL \"\"\" return self._size def insert(self, val): \"\"\" basic", "inserts node before node at val \"\"\" new_node = Node(new_val)", "1 return new_node._next def insert_before(self, val, new_val): \"\"\" inserts node", "__init__(self, iter=[]): self.head = None self._size = 0 for item", "{}'.format(self.head.val) def __str__(self): \"\"\" this is where we can see", "new_val): \"\"\" inserts node before node at val \"\"\" new_node", "val): \"\"\" appends node to the end of the LL", "= self.head._next while current._next is not None: if current.val ==", "to the end of the LL \"\"\" new_node = Node(val,", "current._next._next is None: current._next._next = new_node new_node._next is None self._size", "= current._next if current._next is None: raise ValueError(\"Data not in", "not None: if current.val == val: new_node._next = current._next._next current._next", "new_node._next = current._next current._next = new_node self._size += 1 break", "current._next is None: raise ValueError(\"Data not in list\") def kth_from_end(self,", "reversed(iter): self.insert(item) def __repr__(self): \"\"\" assumes head will have a", "\"\"\" new_node = Node(new_val) current = self.head._next while current._next is", "current._next is not None: current._next = current._next._next if current._next._next is", "def __len__(self): \"\"\" returns size of LL \"\"\" return self._size", "we will need this \"\"\" return '<head> => {}'.format(self.head.val) def", "if current._next.val == val: new_node._next = current._next current._next = new_node", "= new_node self._size += 1 break current = current._next if", "new_node = Node(val, None) current = self.head._next while current._next is", "+= 1 return new_node._next def insert_before(self, val, new_val): \"\"\" inserts", "current = self.head._next while current._next is not None: if current.val", "while current._next is not None: if current._next.val == val: new_node._next", "self.head for i in range(self._size - k - 1): current", "self.head) self._size += 1 def append(self, val): \"\"\" appends node", "\"\"\" inserts node before node at val \"\"\" new_node =" ]
[ "# ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.poolDBESSource", "input = cms.untracked.int32(1) ) process.poolDBESSource = cms.ESSource(\"PoolDBESSource\", BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),", "as cms process = cms.Process(\"LIKELIHOODPDFDBREADER\") # process.load(\"MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff\") process.source = cms.Source(\"EmptySource\",", "= cms.untracked.string('runnumber'), connect = cms.string('sqlite_file:dummy2.db'), toGet = cms.VPSet(cms.PSet( record =", "cms.untracked.PSet( input = cms.untracked.int32(1) ) process.poolDBESSource = cms.ESSource(\"PoolDBESSource\", BlobStreamerName =", "process.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\") process.load(\"RecoMuon.DetLayers.muonDetLayerGeometry_cfi\") process.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\") process.load(\"RecoMuon.TrackingTools.MuonServiceProxy_cff\") # process.source = cms.Source(\"PoolSource\", # fileNames", "# process.source = cms.Source(\"PoolSource\", # fileNames = cms.untracked.vstring() # )", "cms.untracked.vstring() # ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )", "= cms.string('MuScleFitLikelihoodPdf_2_1_12') )) ) process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer( \"LikelihoodPdfDBReader\" ) process.p1", "process.poolDBESSource = cms.ESSource(\"PoolDBESSource\", BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'), DBParameters = cms.PSet( messageLevel", "= cms.untracked.int32(2), authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ), timetype = cms.untracked.string('runnumber'), connect", "= cms.string('MuScleFitLikelihoodPdfRcd'), tag = cms.string('MuScleFitLikelihoodPdf_2_1_12') )) ) process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer(", "tag = cms.string('MuScleFitLikelihoodPdf_2_1_12') )) ) process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer( \"LikelihoodPdfDBReader\" )", "cms.PSet( messageLevel = cms.untracked.int32(2), authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ), timetype =", "cms process = cms.Process(\"LIKELIHOODPDFDBREADER\") # process.load(\"MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff\") process.source = cms.Source(\"EmptySource\", numberEventsInRun", "record = cms.string('MuScleFitLikelihoodPdfRcd'), tag = cms.string('MuScleFitLikelihoodPdf_2_1_12') )) ) process.LikelihoodPdfDBReaderModule =", "cms.untracked.int32(2), authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ), timetype = cms.untracked.string('runnumber'), connect =", "= cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ), timetype = cms.untracked.string('runnumber'), connect = cms.string('sqlite_file:dummy2.db'), toGet", "= cms.string('sqlite_file:dummy2.db'), toGet = cms.VPSet(cms.PSet( record = cms.string('MuScleFitLikelihoodPdfRcd'), tag =", "= cms.untracked.uint32(1) ) process.load(\"Configuration.StandardSequences.MagneticField_cff\") process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\") process.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\") process.load(\"RecoMuon.DetLayers.muonDetLayerGeometry_cfi\") process.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\") process.load(\"RecoMuon.TrackingTools.MuonServiceProxy_cff\") #", "FWCore.ParameterSet.Config as cms process = cms.Process(\"LIKELIHOODPDFDBREADER\") # process.load(\"MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff\") process.source =", "connect = cms.string('sqlite_file:dummy2.db'), toGet = cms.VPSet(cms.PSet( record = cms.string('MuScleFitLikelihoodPdfRcd'), tag", ") process.load(\"Configuration.StandardSequences.MagneticField_cff\") process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\") process.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\") process.load(\"RecoMuon.DetLayers.muonDetLayerGeometry_cfi\") process.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\") process.load(\"RecoMuon.TrackingTools.MuonServiceProxy_cff\") # process.source =", "process.load(\"Configuration.StandardSequences.MagneticField_cff\") process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\") process.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\") process.load(\"RecoMuon.DetLayers.muonDetLayerGeometry_cfi\") process.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\") process.load(\"RecoMuon.TrackingTools.MuonServiceProxy_cff\") # process.source = cms.Source(\"PoolSource\",", "# fileNames = cms.untracked.vstring() # ) process.maxEvents = cms.untracked.PSet( input", ") process.poolDBESSource = cms.ESSource(\"PoolDBESSource\", BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'), DBParameters = cms.PSet(", "= cms.ESSource(\"PoolDBESSource\", BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'), DBParameters = cms.PSet( messageLevel =", "process.source = cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) )", "cms.ESSource(\"PoolDBESSource\", BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'), DBParameters = cms.PSet( messageLevel = cms.untracked.int32(2),", "numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load(\"Configuration.StandardSequences.MagneticField_cff\") process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\") process.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\")", "= cms.PSet( messageLevel = cms.untracked.int32(2), authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ), timetype", "= cms.untracked.string('TBufferBlobStreamingService'), DBParameters = cms.PSet( messageLevel = cms.untracked.int32(2), authenticationPath =", "), timetype = cms.untracked.string('runnumber'), connect = cms.string('sqlite_file:dummy2.db'), toGet = cms.VPSet(cms.PSet(", "BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'), DBParameters = cms.PSet( messageLevel = cms.untracked.int32(2), authenticationPath", "cms.Process(\"LIKELIHOODPDFDBREADER\") # process.load(\"MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff\") process.source = cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun", "process.load(\"MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff\") process.source = cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1)", "# process.load(\"MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff\") process.source = cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun =", "cms.string('MuScleFitLikelihoodPdfRcd'), tag = cms.string('MuScleFitLikelihoodPdf_2_1_12') )) ) process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer( \"LikelihoodPdfDBReader\"", "import FWCore.ParameterSet.Config as cms process = cms.Process(\"LIKELIHOODPDFDBREADER\") # process.load(\"MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff\") process.source", "= cms.Source(\"PoolSource\", # fileNames = cms.untracked.vstring() # ) process.maxEvents =", "= cms.untracked.vstring() # ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1)", "cms.untracked.string('TBufferBlobStreamingService'), DBParameters = cms.PSet( messageLevel = cms.untracked.int32(2), authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')", "messageLevel = cms.untracked.int32(2), authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ), timetype = cms.untracked.string('runnumber'),", "process.source = cms.Source(\"PoolSource\", # fileNames = cms.untracked.vstring() # ) process.maxEvents", "cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ), timetype = cms.untracked.string('runnumber'), connect = cms.string('sqlite_file:dummy2.db'), toGet =", "authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ), timetype = cms.untracked.string('runnumber'), connect = cms.string('sqlite_file:dummy2.db'),", "cms.string('sqlite_file:dummy2.db'), toGet = cms.VPSet(cms.PSet( record = cms.string('MuScleFitLikelihoodPdfRcd'), tag = cms.string('MuScleFitLikelihoodPdf_2_1_12')", "cms.VPSet(cms.PSet( record = cms.string('MuScleFitLikelihoodPdfRcd'), tag = cms.string('MuScleFitLikelihoodPdf_2_1_12') )) ) process.LikelihoodPdfDBReaderModule", "process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\") process.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\") process.load(\"RecoMuon.DetLayers.muonDetLayerGeometry_cfi\") process.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\") process.load(\"RecoMuon.TrackingTools.MuonServiceProxy_cff\") # process.source = cms.Source(\"PoolSource\", #", "= cms.Process(\"LIKELIHOODPDFDBREADER\") # process.load(\"MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff\") process.source = cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1),", "fileNames = cms.untracked.vstring() # ) process.maxEvents = cms.untracked.PSet( input =", "process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.poolDBESSource = cms.ESSource(\"PoolDBESSource\",", ")) ) process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer( \"LikelihoodPdfDBReader\" ) process.p1 = cms.Path(process.LikelihoodPdfDBReaderModule)", "DBParameters = cms.PSet( messageLevel = cms.untracked.int32(2), authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ),", "cms.untracked.string('runnumber'), connect = cms.string('sqlite_file:dummy2.db'), toGet = cms.VPSet(cms.PSet( record = cms.string('MuScleFitLikelihoodPdfRcd'),", "= cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load(\"Configuration.StandardSequences.MagneticField_cff\")", "= cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load(\"Configuration.StandardSequences.MagneticField_cff\") process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\") process.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\") process.load(\"RecoMuon.DetLayers.muonDetLayerGeometry_cfi\")", "toGet = cms.VPSet(cms.PSet( record = cms.string('MuScleFitLikelihoodPdfRcd'), tag = cms.string('MuScleFitLikelihoodPdf_2_1_12') ))", "cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load(\"Configuration.StandardSequences.MagneticField_cff\") process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\")", "process = cms.Process(\"LIKELIHOODPDFDBREADER\") # process.load(\"MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff\") process.source = cms.Source(\"EmptySource\", numberEventsInRun =", "firstRun = cms.untracked.uint32(1) ) process.load(\"Configuration.StandardSequences.MagneticField_cff\") process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\") process.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\") process.load(\"RecoMuon.DetLayers.muonDetLayerGeometry_cfi\") process.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\") process.load(\"RecoMuon.TrackingTools.MuonServiceProxy_cff\")", "cms.string('MuScleFitLikelihoodPdf_2_1_12') )) ) process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer( \"LikelihoodPdfDBReader\" ) process.p1 =", "cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load(\"Configuration.StandardSequences.MagneticField_cff\") process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\") process.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\") process.load(\"RecoMuon.DetLayers.muonDetLayerGeometry_cfi\") process.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\")", "= cms.untracked.int32(1) ) process.poolDBESSource = cms.ESSource(\"PoolDBESSource\", BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'), DBParameters", "process.load(\"RecoMuon.DetLayers.muonDetLayerGeometry_cfi\") process.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\") process.load(\"RecoMuon.TrackingTools.MuonServiceProxy_cff\") # process.source = cms.Source(\"PoolSource\", # fileNames =", "process.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\") process.load(\"RecoMuon.TrackingTools.MuonServiceProxy_cff\") # process.source = cms.Source(\"PoolSource\", # fileNames = cms.untracked.vstring()", "process.load(\"RecoMuon.TrackingTools.MuonServiceProxy_cff\") # process.source = cms.Source(\"PoolSource\", # fileNames = cms.untracked.vstring() #", ") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.poolDBESSource =", "cms.untracked.uint32(1) ) process.load(\"Configuration.StandardSequences.MagneticField_cff\") process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\") process.load(\"Geometry.CommonTopologies.globalTrackingGeometry_cfi\") process.load(\"RecoMuon.DetLayers.muonDetLayerGeometry_cfi\") process.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\") process.load(\"RecoMuon.TrackingTools.MuonServiceProxy_cff\") # process.source", "cms.untracked.int32(1) ) process.poolDBESSource = cms.ESSource(\"PoolDBESSource\", BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'), DBParameters =", "timetype = cms.untracked.string('runnumber'), connect = cms.string('sqlite_file:dummy2.db'), toGet = cms.VPSet(cms.PSet( record", "= cms.untracked.PSet( input = cms.untracked.int32(1) ) process.poolDBESSource = cms.ESSource(\"PoolDBESSource\", BlobStreamerName", "cms.Source(\"PoolSource\", # fileNames = cms.untracked.vstring() # ) process.maxEvents = cms.untracked.PSet(", "= cms.VPSet(cms.PSet( record = cms.string('MuScleFitLikelihoodPdfRcd'), tag = cms.string('MuScleFitLikelihoodPdf_2_1_12') )) )" ]
[ "lab = batch['labels'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask, labels=lab) loss =", "original_A, original_B, predicted_A, predicted_B def train_and_save(self, texts, labels, path, epochs=5,", "torch import nn class FastFineTuna: def __init__(self, model_name, tokenizer_name): self.model_name", "labels_A, labels_B, splits=5, epochs=5, batch_size=16, learning_rate=5e-5, ): tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)", "attention_mask = batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) predicted.extend(torch.argmax(outputs[\"logits\"], axis=1).cpu().numpy().tolist()) del", "= [] predicted = [] for train_index, test_index in skf.split(texts,", "test_index in skf.split(texts, labels): model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) X_train, X_test", "loss_A = loss(outputs[0], lab_A) loss_B = loss(outputs[1], lab_B) loss =", "torch.cuda.is_available() else torch.device('cpu') def cross_validate_fit(self, texts, labels, splits=5, epochs=5, batch_size=16,", "tqdm(total=epochs, position=0, leave=True) for epoch in range(epochs): pbar.update(1) for batch", "truncation=True, padding=True) train_dataset = MainDataset(tokenized_train, labels) model.to(self.device) model.train() train_loader =", "labels_A = np.array(labels_A) labels_B = np.array(labels_B) skf = StratifiedKFold(n_splits=splits) original_A", "= labels_A[train_index].tolist(), labels_A[test_index].tolist() y_B_train, y_B_test = labels_B[train_index].tolist(), labels_B[test_index].tolist() # not", "= loss(outputs[1], lab_B) loss = loss_A + loss_B loss.backward() optim.step()", "batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)", "np.array(labels) skf = StratifiedKFold(n_splits=splits) original = [] predicted = []", "else torch.device('cpu') def cross_validate_fit(self, texts, labels, splits=5, epochs=5, batch_size=16, learning_rate=5e-5):", "loss(outputs[1], lab_B) loss = loss_A + loss_B loss.backward() optim.step() pbar.close()", "MainDatasetDouble, MainDataset from transformers import AdamW from torch.utils.data import DataLoader", "= AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist() y_train, y_test", "import MiniModel from torch import nn class FastFineTuna: def __init__(self,", "attention_mask = batch['attention_mask'].to(self.device) lab_A = batch['labels_A'].to(self.device) lab_B = batch['labels_B'].to(self.device) outputs", "= MainDataset(tokenized_test, y_test) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)", "in skf.split(texts, labels_A, labels_B): model = MiniModel(self.model_name, len(set(labels_A)), len(set(labels_B))) X_train,", "from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset from transformers import AdamW from", "splits=5, epochs=5, batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") tokenizer", "= [] predicted_A = [] predicted_B = [] for train_index,", "torch.device('cpu') def cross_validate_fit(self, texts, labels, splits=5, epochs=5, batch_size=16, learning_rate=5e-5): config", "to do this, but faster to code up tokenized_train =", "= model(input_ids, attention_mask=attention_mask) loss = nn.CrossEntropyLoss() loss_A = loss(outputs[0], lab_A)", "original_B, predicted_A, predicted_B def train_and_save(self, texts, labels, path, epochs=5, batch_size=16,", "y_test = labels[train_index].tolist(), labels[test_index].tolist() # not the smartest way to", "= np.array(labels_A) labels_B = np.array(labels_B) skf = StratifiedKFold(n_splits=splits) original_A =", "= texts[train_index].tolist(), texts[test_index].tolist() y_train, y_test = labels[train_index].tolist(), labels[test_index].tolist() # not", "MainDatasetDouble(tokenized_train, y_A_train, y_B_train) test_dataset = MainDatasetDouble(tokenized_test, y_A_test, y_B_test) model.to(self.device) model.train()", "y_A_train, y_B_train) test_dataset = MainDatasetDouble(tokenized_test, y_A_test, y_B_test) model.to(self.device) model.train() train_loader", "train_dataset = MainDataset(tokenized_train, labels) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size,", "in loader: input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) outputs =", "batch['attention_mask'].to(self.device) lab = batch['labels'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask, labels=lab) loss", "MiniModel from torch import nn class FastFineTuna: def __init__(self, model_name,", "batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) predicted.extend(torch.argmax(outputs[\"logits\"], axis=1).cpu().numpy().tolist())", "tokenizer(X_test, truncation=True, padding=True) train_dataset = MainDataset(tokenized_train, y_train) test_dataset = MainDataset(tokenized_test,", "predicted_B def train_and_save(self, texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5): config", "AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)", "original_B.extend(y_B_test) with torch.no_grad(): for batch in loader: input_ids = batch['input_ids'].to(self.device)", "for train_index, test_index in skf.split(texts, labels): model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config)", "= MainDataset(tokenized_train, labels) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)", "texts, labels_A, labels_B, splits=5, epochs=5, batch_size=16, learning_rate=5e-5, ): tokenizer =", "input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask)", "learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts", "cross_validate_fit(self, texts, labels, splits=5, epochs=5, batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name,", "labels_A[test_index].tolist() y_B_train, y_B_test = labels_B[train_index].tolist(), labels_B[test_index].tolist() # not the smartest", "outputs[0] loss.backward() optim.step() pbar.close() os.makedirs(path) model.save_pretrained(path) tokenizer.save_pretrained(path) class DoubleFastFineTuna: def", "AutoModelForSequenceClassification, AutoTokenizer, AutoConfig from sklearn.model_selection import StratifiedKFold import numpy as", "optim.zero_grad() input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab_A = batch['labels_A'].to(self.device)", "texts = np.array(texts) labels_A = np.array(labels_A) labels_B = np.array(labels_B) skf", "DoubleFastFineTuna: def __init__(self, model_name, tokenizer_name): self.model_name = model_name self.tokenizer_name =", "loss = nn.CrossEntropyLoss() loss_A = loss(outputs[0], lab_A) loss_B = loss(outputs[1],", "original_A = [] original_B = [] predicted_A = [] predicted_B", "batch_size=batch_size) original_A.extend(y_A_test) original_B.extend(y_B_test) with torch.no_grad(): for batch in loader: input_ids", "batch['labels_B'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) loss = nn.CrossEntropyLoss() loss_A =", "with torch.no_grad(): for batch in loader: input_ids = batch['input_ids'].to(self.device) attention_mask", "= DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim = AdamW(model.parameters(), lr=learning_rate) pbar =", "del model return original_A, original_B, predicted_A, predicted_B def train_and_save(self, texts,", "for batch in train_loader: optim.zero_grad() input_ids = batch['input_ids'].to(self.device) attention_mask =", "labels) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim =", "loss_B = loss(outputs[1], lab_B) loss = loss_A + loss_B loss.backward()", "= AutoTokenizer.from_pretrained(self.tokenizer_name) tokenized_train = tokenizer(texts, truncation=True, padding=True) train_dataset = MainDataset(tokenized_train,", "train_dataset = MainDataset(tokenized_train, y_train) test_dataset = MainDataset(tokenized_test, y_test) model.to(self.device) model.train()", "epoch in range(epochs): pbar.update(1) for batch in train_loader: optim.zero_grad() input_ids", "epochs=5, batch_size=16, learning_rate=5e-5, ): tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts)", "self.model_name = model_name self.tokenizer_name = tokenizer_name self.device = torch.device('cuda') if", "= AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) tokenizer =", "train_dataset = MainDatasetDouble(tokenized_train, y_A_train, y_B_train) test_dataset = MainDatasetDouble(tokenized_test, y_A_test, y_B_test)", "loss.backward() optim.step() pbar.close() model.eval() loader = DataLoader(test_dataset, batch_size=batch_size) original_A.extend(y_A_test) original_B.extend(y_B_test)", "smartest way to do this, but faster to code up", "config=config) tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) tokenized_train = tokenizer(texts, truncation=True, padding=True) train_dataset", "y_A_test = labels_A[train_index].tolist(), labels_A[test_index].tolist() y_B_train, y_B_test = labels_B[train_index].tolist(), labels_B[test_index].tolist() #", "= tokenizer(texts, truncation=True, padding=True) train_dataset = MainDataset(tokenized_train, labels) model.to(self.device) model.train()", "attention_mask = batch['attention_mask'].to(self.device) lab = batch['labels'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask,", "train_and_save(self, texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name,", "= torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') def cross_validate_fit(self, texts, labels_A,", "batch['labels_A'].to(self.device) lab_B = batch['labels_B'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) loss =", "from sklearn.model_selection import StratifiedKFold import numpy as np import torch", "loader: input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) outputs = model(input_ids,", "config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts =", "= torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') def cross_validate_fit(self, texts, labels,", "loss_A + loss_B loss.backward() optim.step() pbar.close() model.eval() loader = DataLoader(test_dataset,", "AdamW(model.parameters(), lr=learning_rate) pbar = tqdm(total=epochs, position=0, leave=True) for epoch in", "num_labels=len(set(labels)), finetuning_task=\"custom\") model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) tokenized_train", "from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig from sklearn.model_selection import", "train_index, test_index in skf.split(texts, labels_A, labels_B): model = MiniModel(self.model_name, len(set(labels_A)),", "lab_B = batch['labels_B'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) loss = nn.CrossEntropyLoss()", "cross_validate_fit(self, texts, labels_A, labels_B, splits=5, epochs=5, batch_size=16, learning_rate=5e-5, ): tokenizer", "batch['attention_mask'].to(self.device) lab_A = batch['labels_A'].to(self.device) lab_B = batch['labels_B'].to(self.device) outputs = model(input_ids,", "): tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels_A = np.array(labels_A)", "nn.CrossEntropyLoss() loss_A = loss(outputs[0], lab_A) loss_B = loss(outputs[1], lab_B) loss", "= loss_A + loss_B loss.backward() optim.step() pbar.close() model.eval() loader =", "learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config)", "MainDataset(tokenized_train, y_train) test_dataset = MainDataset(tokenized_test, y_test) model.to(self.device) model.train() train_loader =", "padding=True) tokenized_test = tokenizer(X_test, truncation=True, padding=True) train_dataset = MainDataset(tokenized_train, y_train)", "faster to code up tokenized_train = tokenizer(X_train, truncation=True, padding=True) tokenized_test", "torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') def cross_validate_fit(self, texts, labels, splits=5,", "batch in loader: input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) outputs", "= StratifiedKFold(n_splits=splits) original_A = [] original_B = [] predicted_A =", "DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim = AdamW(model.parameters(), lr=learning_rate) pbar = tqdm(total=epochs,", "StratifiedKFold(n_splits=splits) original_A = [] original_B = [] predicted_A = []", "batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") model = AutoModelForSequenceClassification.from_pretrained(self.model_name,", "= tokenizer(X_test, truncation=True, padding=True) train_dataset = MainDataset(tokenized_train, y_train) test_dataset =", "[] predicted_B = [] for train_index, test_index in skf.split(texts, labels_A,", "loss = outputs[0] loss.backward() optim.step() pbar.close() model.eval() loader = DataLoader(test_dataset,", "transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig from sklearn.model_selection import StratifiedKFold", "in train_loader: optim.zero_grad() input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab", "if torch.cuda.is_available() else torch.device('cpu') def cross_validate_fit(self, texts, labels_A, labels_B, splits=5,", "= [] predicted_B = [] for train_index, test_index in skf.split(texts,", "AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) tokenized_train = tokenizer(texts, truncation=True, padding=True)", "predicted_B = [] for train_index, test_index in skf.split(texts, labels_A, labels_B):", "torch.cuda.is_available() else torch.device('cpu') def cross_validate_fit(self, texts, labels_A, labels_B, splits=5, epochs=5,", "= outputs[0] loss.backward() optim.step() pbar.close() os.makedirs(path) model.save_pretrained(path) tokenizer.save_pretrained(path) class DoubleFastFineTuna:", "skf.split(texts, labels): model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) X_train, X_test = texts[train_index].tolist(),", "= model(input_ids, attention_mask=attention_mask) predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist()) predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist()) del model return", "np.array(labels_A) labels_B = np.array(labels_B) skf = StratifiedKFold(n_splits=splits) original_A = []", "import tqdm from fast_fine_tuna.models import MiniModel from torch import nn", "config=config) X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist() y_train, y_test = labels[train_index].tolist(),", "return original, predicted def train_and_save(self, texts, labels, path, epochs=5, batch_size=16,", "[] for train_index, test_index in skf.split(texts, labels_A, labels_B): model =", "= tokenizer(X_test, truncation=True, padding=True) train_dataset = MainDatasetDouble(tokenized_train, y_A_train, y_B_train) test_dataset", "batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab_A = batch['labels_A'].to(self.device) lab_B = batch['labels_B'].to(self.device)", "fast_fine_tuna.dataset import MainDatasetDouble, MainDataset from transformers import AdamW from torch.utils.data", "loader = DataLoader(test_dataset, batch_size=batch_size) original_A.extend(y_A_test) original_B.extend(y_B_test) with torch.no_grad(): for batch", "def train_and_save(self, texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5): config =", "MainDatasetDouble(tokenized_test, y_A_test, y_B_test) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)", "original_B = [] predicted_A = [] predicted_B = [] for", "position=0, leave=True) for epoch in range(epochs): pbar.update(1) for batch in", "torch from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset from transformers import AdamW", "input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab = batch['labels'].to(self.device) outputs", "padding=True) train_dataset = MainDatasetDouble(tokenized_train, y_A_train, y_B_train) test_dataset = MainDatasetDouble(tokenized_test, y_A_test,", "= [] original_B = [] predicted_A = [] predicted_B =", "from transformers import AdamW from torch.utils.data import DataLoader import os", "class DoubleFastFineTuna: def __init__(self, model_name, tokenizer_name): self.model_name = model_name self.tokenizer_name", "loss_B loss.backward() optim.step() pbar.close() model.eval() loader = DataLoader(test_dataset, batch_size=batch_size) original_A.extend(y_A_test)", "outputs = model(input_ids, attention_mask=attention_mask) predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist()) predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist()) del model", "+ loss_B loss.backward() optim.step() pbar.close() model.eval() loader = DataLoader(test_dataset, batch_size=batch_size)", "up tokenized_train = tokenizer(X_train, truncation=True, padding=True) tokenized_test = tokenizer(X_test, truncation=True,", "attention_mask=attention_mask) predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist()) predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist()) del model return original_A, original_B,", "= labels_B[train_index].tolist(), labels_B[test_index].tolist() # not the smartest way to do", "AutoConfig from sklearn.model_selection import StratifiedKFold import numpy as np import", "def cross_validate_fit(self, texts, labels, splits=5, epochs=5, batch_size=16, learning_rate=5e-5): config =", "predicted_A, predicted_B def train_and_save(self, texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5):", "X_test = texts[train_index].tolist(), texts[test_index].tolist() y_A_train, y_A_test = labels_A[train_index].tolist(), labels_A[test_index].tolist() y_B_train,", "= texts[train_index].tolist(), texts[test_index].tolist() y_A_train, y_A_test = labels_A[train_index].tolist(), labels_A[test_index].tolist() y_B_train, y_B_test", "AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels", "texts[train_index].tolist(), texts[test_index].tolist() y_train, y_test = labels[train_index].tolist(), labels[test_index].tolist() # not the", "the smartest way to do this, but faster to code", "optim.step() pbar.close() os.makedirs(path) model.save_pretrained(path) tokenizer.save_pretrained(path) class DoubleFastFineTuna: def __init__(self, model_name,", "model(input_ids, attention_mask=attention_mask) predicted.extend(torch.argmax(outputs[\"logits\"], axis=1).cpu().numpy().tolist()) del model return original, predicted def", "else torch.device('cpu') def cross_validate_fit(self, texts, labels_A, labels_B, splits=5, epochs=5, batch_size=16,", "= batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) predicted.extend(torch.argmax(outputs[\"logits\"],", "padding=True) train_dataset = MainDataset(tokenized_train, y_train) test_dataset = MainDataset(tokenized_test, y_test) model.to(self.device)", "= AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels_A = np.array(labels_A) labels_B =", "attention_mask=attention_mask) loss = nn.CrossEntropyLoss() loss_A = loss(outputs[0], lab_A) loss_B =", "optim.zero_grad() input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab = batch['labels'].to(self.device)", "attention_mask = batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist()) predicted_B.extend(torch.argmax(outputs[1],", "np.array(texts) labels_A = np.array(labels_A) labels_B = np.array(labels_B) skf = StratifiedKFold(n_splits=splits)", "self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') def cross_validate_fit(self, texts,", "import os from tqdm import tqdm from fast_fine_tuna.models import MiniModel", "def cross_validate_fit(self, texts, labels_A, labels_B, splits=5, epochs=5, batch_size=16, learning_rate=5e-5, ):", "X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist() y_A_train, y_A_test = labels_A[train_index].tolist(), labels_A[test_index].tolist()", "nn class FastFineTuna: def __init__(self, model_name, tokenizer_name): self.model_name = model_name", "num_labels=len(set(labels)), finetuning_task=\"custom\") tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels =", "attention_mask=attention_mask, labels=lab) loss = outputs[0] loss.backward() optim.step() pbar.close() os.makedirs(path) model.save_pretrained(path)", "in train_loader: optim.zero_grad() input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab_A", "outputs = model(input_ids, attention_mask=attention_mask) loss = nn.CrossEntropyLoss() loss_A = loss(outputs[0],", "len(set(labels_A)), len(set(labels_B))) X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist() y_A_train, y_A_test =", "MainDataset(tokenized_train, labels) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim", "path, epochs=5, batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") model", "StratifiedKFold import numpy as np import torch from fast_fine_tuna.dataset import", "to code up tokenized_train = tokenizer(X_train, truncation=True, padding=True) tokenized_test =", "import StratifiedKFold import numpy as np import torch from fast_fine_tuna.dataset", "tokenized_train = tokenizer(X_train, truncation=True, padding=True) tokenized_test = tokenizer(X_test, truncation=True, padding=True)", "axis=1).cpu().numpy().tolist()) del model return original, predicted def train_and_save(self, texts, labels,", "import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig from sklearn.model_selection import StratifiedKFold import", "loss = loss_A + loss_B loss.backward() optim.step() pbar.close() model.eval() loader", "y_B_test) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim =", "tokenizer(texts, truncation=True, padding=True) train_dataset = MainDataset(tokenized_train, labels) model.to(self.device) model.train() train_loader", "np.array(texts) labels = np.array(labels) skf = StratifiedKFold(n_splits=splits) original = []", "predicted.extend(torch.argmax(outputs[\"logits\"], axis=1).cpu().numpy().tolist()) del model return original, predicted def train_and_save(self, texts,", "do this, but faster to code up tokenized_train = tokenizer(X_train,", "skf = StratifiedKFold(n_splits=splits) original = [] predicted = [] for", "MiniModel(self.model_name, len(set(labels_A)), len(set(labels_B))) X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist() y_A_train, y_A_test", "test_dataset = MainDatasetDouble(tokenized_test, y_A_test, y_B_test) model.to(self.device) model.train() train_loader = DataLoader(train_dataset,", "= DataLoader(test_dataset, batch_size=batch_size) original_A.extend(y_A_test) original_B.extend(y_B_test) with torch.no_grad(): for batch in", "= batch['labels_A'].to(self.device) lab_B = batch['labels_B'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) loss", "del model return original, predicted def train_and_save(self, texts, labels, path,", "original, predicted def train_and_save(self, texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5):", "AutoTokenizer, AutoConfig from sklearn.model_selection import StratifiedKFold import numpy as np", "model(input_ids, attention_mask=attention_mask, labels=lab) loss = outputs[0] loss.backward() optim.step() pbar.close() model.eval()", "batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist()) predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist()) del", "tokenizer_name): self.model_name = model_name self.tokenizer_name = tokenizer_name self.device = torch.device('cuda')", "original = [] predicted = [] for train_index, test_index in", "axis=1).cpu().numpy().tolist()) del model return original_A, original_B, predicted_A, predicted_B def train_and_save(self,", "lab_A = batch['labels_A'].to(self.device) lab_B = batch['labels_B'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask)", "X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist() y_train, y_test = labels[train_index].tolist(), labels[test_index].tolist()", "labels[train_index].tolist(), labels[test_index].tolist() # not the smartest way to do this,", "train_loader: optim.zero_grad() input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab_A =", "= batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab_A = batch['labels_A'].to(self.device) lab_B =", "AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels = np.array(labels) skf = StratifiedKFold(n_splits=splits)", "= nn.CrossEntropyLoss() loss_A = loss(outputs[0], lab_A) loss_B = loss(outputs[1], lab_B)", "predicted def train_and_save(self, texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5): config", "= model_name self.tokenizer_name = tokenizer_name self.device = torch.device('cuda') if torch.cuda.is_available()", "AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels_A = np.array(labels_A) labels_B = np.array(labels_B)", "len(set(labels_B))) X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist() y_A_train, y_A_test = labels_A[train_index].tolist(),", "this, but faster to code up tokenized_train = tokenizer(X_train, truncation=True,", "but faster to code up tokenized_train = tokenizer(X_train, truncation=True, padding=True)", "= np.array(texts) labels = np.array(labels) skf = StratifiedKFold(n_splits=splits) original =", "texts[test_index].tolist() y_A_train, y_A_test = labels_A[train_index].tolist(), labels_A[test_index].tolist() y_B_train, y_B_test = labels_B[train_index].tolist(),", "= outputs[0] loss.backward() optim.step() pbar.close() model.eval() loader = DataLoader(test_dataset, batch_size=batch_size)", "model return original, predicted def train_and_save(self, texts, labels, path, epochs=5,", "batch_size=16, learning_rate=5e-5, ): tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels_A", "model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist() y_train,", "import DataLoader import os from tqdm import tqdm from fast_fine_tuna.models", "original_A.extend(y_A_test) original_B.extend(y_B_test) with torch.no_grad(): for batch in loader: input_ids =", "MainDataset(tokenized_test, y_test) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim", "predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist()) predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist()) del model return original_A, original_B, predicted_A,", "outputs = model(input_ids, attention_mask=attention_mask) predicted.extend(torch.argmax(outputs[\"logits\"], axis=1).cpu().numpy().tolist()) del model return original,", "= AdamW(model.parameters(), lr=learning_rate) pbar = tqdm(total=epochs, position=0, leave=True) for epoch", "# not the smartest way to do this, but faster", "model_name self.tokenizer_name = tokenizer_name self.device = torch.device('cuda') if torch.cuda.is_available() else", "os from tqdm import tqdm from fast_fine_tuna.models import MiniModel from", "batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) predicted.extend(torch.argmax(outputs[\"logits\"], axis=1).cpu().numpy().tolist()) del model return", "import MainDatasetDouble, MainDataset from transformers import AdamW from torch.utils.data import", "[] for train_index, test_index in skf.split(texts, labels): model = AutoModelForSequenceClassification.from_pretrained(self.model_name,", "= batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) predicted_A.extend(torch.argmax(outputs[0],", "= MainDatasetDouble(tokenized_train, y_A_train, y_B_train) test_dataset = MainDatasetDouble(tokenized_test, y_A_test, y_B_test) model.to(self.device)", "= batch['attention_mask'].to(self.device) lab_A = batch['labels_A'].to(self.device) lab_B = batch['labels_B'].to(self.device) outputs =", "= labels[train_index].tolist(), labels[test_index].tolist() # not the smartest way to do", "outputs[0] loss.backward() optim.step() pbar.close() model.eval() loader = DataLoader(test_dataset, batch_size=batch_size) original.extend(y_test)", "train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim = AdamW(model.parameters(), lr=learning_rate) pbar", "labels_A, labels_B): model = MiniModel(self.model_name, len(set(labels_A)), len(set(labels_B))) X_train, X_test =", "torch.no_grad(): for batch in loader: input_ids = batch['input_ids'].to(self.device) attention_mask =", "labels_A[train_index].tolist(), labels_A[test_index].tolist() y_B_train, y_B_test = labels_B[train_index].tolist(), labels_B[test_index].tolist() # not the", "import AdamW from torch.utils.data import DataLoader import os from tqdm", "train_loader: optim.zero_grad() input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab =", "from fast_fine_tuna.models import MiniModel from torch import nn class FastFineTuna:", "tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels_A = np.array(labels_A) labels_B", "optim.step() pbar.close() model.eval() loader = DataLoader(test_dataset, batch_size=batch_size) original_A.extend(y_A_test) original_B.extend(y_B_test) with", "for epoch in range(epochs): pbar.update(1) for batch in train_loader: optim.zero_grad()", "test_dataset = MainDataset(tokenized_test, y_test) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size,", "import torch from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset from transformers import", "y_train, y_test = labels[train_index].tolist(), labels[test_index].tolist() # not the smartest way", "labels_B, splits=5, epochs=5, batch_size=16, learning_rate=5e-5, ): tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts", "= batch['labels_B'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) loss = nn.CrossEntropyLoss() loss_A", "= tokenizer_name self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') def", "model = MiniModel(self.model_name, len(set(labels_A)), len(set(labels_B))) X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist()", "y_A_train, y_A_test = labels_A[train_index].tolist(), labels_A[test_index].tolist() y_B_train, y_B_test = labels_B[train_index].tolist(), labels_B[test_index].tolist()", "fast_fine_tuna.models import MiniModel from torch import nn class FastFineTuna: def", "finetuning_task=\"custom\") tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels = np.array(labels)", "= [] for train_index, test_index in skf.split(texts, labels): model =", "= DataLoader(test_dataset, batch_size=batch_size) original.extend(y_test) with torch.no_grad(): for batch in loader:", "y_test) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim =", "lab_B) loss = loss_A + loss_B loss.backward() optim.step() pbar.close() model.eval()", "for batch in loader: input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device)", "def __init__(self, model_name, tokenizer_name): self.model_name = model_name self.tokenizer_name = tokenizer_name", "class FastFineTuna: def __init__(self, model_name, tokenizer_name): self.model_name = model_name self.tokenizer_name", "labels_B): model = MiniModel(self.model_name, len(set(labels_A)), len(set(labels_B))) X_train, X_test = texts[train_index].tolist(),", "for train_index, test_index in skf.split(texts, labels_A, labels_B): model = MiniModel(self.model_name,", "= MainDatasetDouble(tokenized_test, y_A_test, y_B_test) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size,", "model.eval() loader = DataLoader(test_dataset, batch_size=batch_size) original_A.extend(y_A_test) original_B.extend(y_B_test) with torch.no_grad(): for", "= MiniModel(self.model_name, len(set(labels_A)), len(set(labels_B))) X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist() y_A_train,", "= batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab = batch['labels'].to(self.device) outputs =", "batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab = batch['labels'].to(self.device) outputs = model(input_ids,", "[] predicted = [] for train_index, test_index in skf.split(texts, labels):", "__init__(self, model_name, tokenizer_name): self.model_name = model_name self.tokenizer_name = tokenizer_name self.device", "= AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels = np.array(labels) skf =", "batch_size=batch_size) original.extend(y_test) with torch.no_grad(): for batch in loader: input_ids =", "pbar.close() model.eval() loader = DataLoader(test_dataset, batch_size=batch_size) original.extend(y_test) with torch.no_grad(): for", "tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) tokenized_train = tokenizer(texts, truncation=True, padding=True) train_dataset =", "predicted_A = [] predicted_B = [] for train_index, test_index in", "= AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) tokenized_train = tokenizer(texts, truncation=True,", "labels, splits=5, epochs=5, batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\")", "pbar.close() os.makedirs(path) model.save_pretrained(path) tokenizer.save_pretrained(path) class DoubleFastFineTuna: def __init__(self, model_name, tokenizer_name):", "from torch import nn class FastFineTuna: def __init__(self, model_name, tokenizer_name):", "batch_size=batch_size, shuffle=True) optim = AdamW(model.parameters(), lr=learning_rate) pbar = tqdm(total=epochs, position=0,", "skf = StratifiedKFold(n_splits=splits) original_A = [] original_B = [] predicted_A", "y_B_train, y_B_test = labels_B[train_index].tolist(), labels_B[test_index].tolist() # not the smartest way", "leave=True) for epoch in range(epochs): pbar.update(1) for batch in train_loader:", "= MainDataset(tokenized_train, y_train) test_dataset = MainDataset(tokenized_test, y_test) model.to(self.device) model.train() train_loader", "import nn class FastFineTuna: def __init__(self, model_name, tokenizer_name): self.model_name =", "texts = np.array(texts) labels = np.array(labels) skf = StratifiedKFold(n_splits=splits) original", "y_A_test, y_B_test) model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim", "tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels = np.array(labels) skf", "train_index, test_index in skf.split(texts, labels): model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) X_train,", "input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) lab_A = batch['labels_A'].to(self.device) lab_B", "model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim = AdamW(model.parameters(), lr=learning_rate)", "= AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts)", "tokenized_test = tokenizer(X_test, truncation=True, padding=True) train_dataset = MainDataset(tokenized_train, y_train) test_dataset", "attention_mask=attention_mask) predicted.extend(torch.argmax(outputs[\"logits\"], axis=1).cpu().numpy().tolist()) del model return original, predicted def train_and_save(self,", "texts[train_index].tolist(), texts[test_index].tolist() y_A_train, y_A_test = labels_A[train_index].tolist(), labels_A[test_index].tolist() y_B_train, y_B_test =", "tokenized_train = tokenizer(texts, truncation=True, padding=True) train_dataset = MainDataset(tokenized_train, labels) model.to(self.device)", "torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') def cross_validate_fit(self, texts, labels_A, labels_B,", "StratifiedKFold(n_splits=splits) original = [] predicted = [] for train_index, test_index", "batch in train_loader: optim.zero_grad() input_ids = batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device)", "model.to(self.device) model.train() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) optim = AdamW(model.parameters(),", "finetuning_task=\"custom\") model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) tokenized_train =", "np.array(labels_B) skf = StratifiedKFold(n_splits=splits) original_A = [] original_B = []", "model(input_ids, attention_mask=attention_mask) predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist()) predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist()) del model return original_A,", "splits=5, epochs=5, batch_size=16, learning_rate=5e-5, ): tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts =", "shuffle=True) optim = AdamW(model.parameters(), lr=learning_rate) pbar = tqdm(total=epochs, position=0, leave=True)", "= np.array(texts) labels_A = np.array(labels_A) labels_B = np.array(labels_B) skf =", "= np.array(labels) skf = StratifiedKFold(n_splits=splits) original = [] predicted =", "tokenizer.save_pretrained(path) class DoubleFastFineTuna: def __init__(self, model_name, tokenizer_name): self.model_name = model_name", "= StratifiedKFold(n_splits=splits) original = [] predicted = [] for train_index,", "= tokenizer(X_train, truncation=True, padding=True) tokenized_test = tokenizer(X_test, truncation=True, padding=True) train_dataset", "predicted = [] for train_index, test_index in skf.split(texts, labels): model", "sklearn.model_selection import StratifiedKFold import numpy as np import torch from", "as np import torch from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset from", "model.eval() loader = DataLoader(test_dataset, batch_size=batch_size) original.extend(y_test) with torch.no_grad(): for batch", "in range(epochs): pbar.update(1) for batch in train_loader: optim.zero_grad() input_ids =", "texts[test_index].tolist() y_train, y_test = labels[train_index].tolist(), labels[test_index].tolist() # not the smartest", "range(epochs): pbar.update(1) for batch in train_loader: optim.zero_grad() input_ids = batch['input_ids'].to(self.device)", "= batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) predicted.extend(torch.argmax(outputs[\"logits\"], axis=1).cpu().numpy().tolist()) del model", "labels[test_index].tolist() # not the smartest way to do this, but", "in skf.split(texts, labels): model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) X_train, X_test =", "= model(input_ids, attention_mask=attention_mask) predicted.extend(torch.argmax(outputs[\"logits\"], axis=1).cpu().numpy().tolist()) del model return original, predicted", "not the smartest way to do this, but faster to", "model(input_ids, attention_mask=attention_mask) loss = nn.CrossEntropyLoss() loss_A = loss(outputs[0], lab_A) loss_B", "= batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist()) predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist())", "code up tokenized_train = tokenizer(X_train, truncation=True, padding=True) tokenized_test = tokenizer(X_test,", "truncation=True, padding=True) tokenized_test = tokenizer(X_test, truncation=True, padding=True) train_dataset = MainDatasetDouble(tokenized_train,", "tokenizer(X_test, truncation=True, padding=True) train_dataset = MainDatasetDouble(tokenized_train, y_A_train, y_B_train) test_dataset =", "[] predicted_A = [] predicted_B = [] for train_index, test_index", "MainDataset from transformers import AdamW from torch.utils.data import DataLoader import", "optim.step() pbar.close() model.eval() loader = DataLoader(test_dataset, batch_size=batch_size) original.extend(y_test) with torch.no_grad():", "labels_B[train_index].tolist(), labels_B[test_index].tolist() # not the smartest way to do this,", "y_train) test_dataset = MainDataset(tokenized_test, y_test) model.to(self.device) model.train() train_loader = DataLoader(train_dataset,", "DataLoader(test_dataset, batch_size=batch_size) original.extend(y_test) with torch.no_grad(): for batch in loader: input_ids", "labels=lab) loss = outputs[0] loss.backward() optim.step() pbar.close() model.eval() loader =", "pbar.update(1) for batch in train_loader: optim.zero_grad() input_ids = batch['input_ids'].to(self.device) attention_mask", "epochs=5, batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") tokenizer =", "AutoTokenizer.from_pretrained(self.tokenizer_name) tokenized_train = tokenizer(texts, truncation=True, padding=True) train_dataset = MainDataset(tokenized_train, labels)", "= batch['attention_mask'].to(self.device) lab = batch['labels'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask, labels=lab)", "truncation=True, padding=True) train_dataset = MainDataset(tokenized_train, y_train) test_dataset = MainDataset(tokenized_test, y_test)", "[] original_B = [] predicted_A = [] predicted_B = []", "test_index in skf.split(texts, labels_A, labels_B): model = MiniModel(self.model_name, len(set(labels_A)), len(set(labels_B)))", "axis=1).cpu().numpy().tolist()) predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist()) del model return original_A, original_B, predicted_A, predicted_B", "y_B_train) test_dataset = MainDatasetDouble(tokenized_test, y_A_test, y_B_test) model.to(self.device) model.train() train_loader =", "tokenizer(X_train, truncation=True, padding=True) tokenized_test = tokenizer(X_test, truncation=True, padding=True) train_dataset =", "AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist() y_train, y_test =", "texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)),", "return original_A, original_B, predicted_A, predicted_B def train_and_save(self, texts, labels, path,", "= tqdm(total=epochs, position=0, leave=True) for epoch in range(epochs): pbar.update(1) for", "labels, path, epochs=5, batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\")", "pbar = tqdm(total=epochs, position=0, leave=True) for epoch in range(epochs): pbar.update(1)", "model return original_A, original_B, predicted_A, predicted_B def train_and_save(self, texts, labels,", "model.save_pretrained(path) tokenizer.save_pretrained(path) class DoubleFastFineTuna: def __init__(self, model_name, tokenizer_name): self.model_name =", "X_test = texts[train_index].tolist(), texts[test_index].tolist() y_train, y_test = labels[train_index].tolist(), labels[test_index].tolist() #", "= loss(outputs[0], lab_A) loss_B = loss(outputs[1], lab_B) loss = loss_A", "optim = AdamW(model.parameters(), lr=learning_rate) pbar = tqdm(total=epochs, position=0, leave=True) for", "outputs = model(input_ids, attention_mask=attention_mask, labels=lab) loss = outputs[0] loss.backward() optim.step()", "tokenized_test = tokenizer(X_test, truncation=True, padding=True) train_dataset = MainDatasetDouble(tokenized_train, y_A_train, y_B_train)", "attention_mask=attention_mask, labels=lab) loss = outputs[0] loss.backward() optim.step() pbar.close() model.eval() loader", "pbar.close() model.eval() loader = DataLoader(test_dataset, batch_size=batch_size) original_A.extend(y_A_test) original_B.extend(y_B_test) with torch.no_grad():", "way to do this, but faster to code up tokenized_train", "learning_rate=5e-5, ): tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) texts = np.array(texts) labels_A =", "= np.array(labels_B) skf = StratifiedKFold(n_splits=splits) original_A = [] original_B =", "batch['labels'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask, labels=lab) loss = outputs[0] loss.backward()", "AdamW from torch.utils.data import DataLoader import os from tqdm import", "numpy as np import torch from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset", "model_name, tokenizer_name): self.model_name = model_name self.tokenizer_name = tokenizer_name self.device =", "labels = np.array(labels) skf = StratifiedKFold(n_splits=splits) original = [] predicted", "truncation=True, padding=True) tokenized_test = tokenizer(X_test, truncation=True, padding=True) train_dataset = MainDataset(tokenized_train,", "y_B_test = labels_B[train_index].tolist(), labels_B[test_index].tolist() # not the smartest way to", "loader = DataLoader(test_dataset, batch_size=batch_size) original.extend(y_test) with torch.no_grad(): for batch in", "np import torch from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset from transformers", "original.extend(y_test) with torch.no_grad(): for batch in loader: input_ids = batch['input_ids'].to(self.device)", "DataLoader import os from tqdm import tqdm from fast_fine_tuna.models import", "import numpy as np import torch from fast_fine_tuna.dataset import MainDatasetDouble,", "model(input_ids, attention_mask=attention_mask, labels=lab) loss = outputs[0] loss.backward() optim.step() pbar.close() os.makedirs(path)", "from tqdm import tqdm from fast_fine_tuna.models import MiniModel from torch", "loss.backward() optim.step() pbar.close() model.eval() loader = DataLoader(test_dataset, batch_size=batch_size) original.extend(y_test) with", "lab_A) loss_B = loss(outputs[1], lab_B) loss = loss_A + loss_B", "torch.utils.data import DataLoader import os from tqdm import tqdm from", "tokenizer_name self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') def cross_validate_fit(self,", "padding=True) train_dataset = MainDataset(tokenized_train, labels) model.to(self.device) model.train() train_loader = DataLoader(train_dataset,", "model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) tokenized_train = tokenizer(texts,", "from torch.utils.data import DataLoader import os from tqdm import tqdm", "skf.split(texts, labels_A, labels_B): model = MiniModel(self.model_name, len(set(labels_A)), len(set(labels_B))) X_train, X_test", "tqdm from fast_fine_tuna.models import MiniModel from torch import nn class", "padding=True) tokenized_test = tokenizer(X_test, truncation=True, padding=True) train_dataset = MainDatasetDouble(tokenized_train, y_A_train,", "AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig from sklearn.model_selection import StratifiedKFold import numpy", "labels_B = np.array(labels_B) skf = StratifiedKFold(n_splits=splits) original_A = [] original_B", "DataLoader(test_dataset, batch_size=batch_size) original_A.extend(y_A_test) original_B.extend(y_B_test) with torch.no_grad(): for batch in loader:", "labels_B[test_index].tolist() # not the smartest way to do this, but", "epochs=5, batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") model =", "labels=lab) loss = outputs[0] loss.backward() optim.step() pbar.close() os.makedirs(path) model.save_pretrained(path) tokenizer.save_pretrained(path)", "labels): model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist()", "self.tokenizer_name = tokenizer_name self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')", "FastFineTuna: def __init__(self, model_name, tokenizer_name): self.model_name = model_name self.tokenizer_name =", "if torch.cuda.is_available() else torch.device('cpu') def cross_validate_fit(self, texts, labels, splits=5, epochs=5,", "lr=learning_rate) pbar = tqdm(total=epochs, position=0, leave=True) for epoch in range(epochs):", "loss.backward() optim.step() pbar.close() os.makedirs(path) model.save_pretrained(path) tokenizer.save_pretrained(path) class DoubleFastFineTuna: def __init__(self,", "transformers import AdamW from torch.utils.data import DataLoader import os from", "texts, labels, splits=5, epochs=5, batch_size=16, learning_rate=5e-5): config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)),", "batch['input_ids'].to(self.device) attention_mask = batch['attention_mask'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask) predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist())", "truncation=True, padding=True) train_dataset = MainDatasetDouble(tokenized_train, y_A_train, y_B_train) test_dataset = MainDatasetDouble(tokenized_test,", "torch.device('cpu') def cross_validate_fit(self, texts, labels_A, labels_B, splits=5, epochs=5, batch_size=16, learning_rate=5e-5,", "config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)), finetuning_task=\"custom\") model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config) tokenizer", "= [] for train_index, test_index in skf.split(texts, labels_A, labels_B): model", "loss(outputs[0], lab_A) loss_B = loss(outputs[1], lab_B) loss = loss_A +", "loss = outputs[0] loss.backward() optim.step() pbar.close() os.makedirs(path) model.save_pretrained(path) tokenizer.save_pretrained(path) class", "= model(input_ids, attention_mask=attention_mask, labels=lab) loss = outputs[0] loss.backward() optim.step() pbar.close()", "<reponame>vinid/fast_fine_tuna from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig from sklearn.model_selection", "predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist()) del model return original_A, original_B, predicted_A, predicted_B def", "= batch['labels'].to(self.device) outputs = model(input_ids, attention_mask=attention_mask, labels=lab) loss = outputs[0]", "os.makedirs(path) model.save_pretrained(path) tokenizer.save_pretrained(path) class DoubleFastFineTuna: def __init__(self, model_name, tokenizer_name): self.model_name", "tqdm import tqdm from fast_fine_tuna.models import MiniModel from torch import" ]
[ "Message class Implementation # @author: <NAME> <<EMAIL>> class Message: class", "Rsponse: def __init__(self): self.status = False self.data = None def", "<NAME> <<EMAIL>> class Message: class Request: def __init__(self, action=\"\", data=None):", "@author: <NAME> <<EMAIL>> class Message: class Request: def __init__(self, action=\"\",", "Request: def __init__(self, action=\"\", data=None): self.action = action self.data =", "None def __init__(self): pass def set_request(self): pass def response(self): pass", "Implementation # @author: <NAME> <<EMAIL>> class Message: class Request: def", "<filename>Message/Message.py # Message class Implementation # @author: <NAME> <<EMAIL>> class", "<<EMAIL>> class Message: class Request: def __init__(self, action=\"\", data=None): self.action", "Message: class Request: def __init__(self, action=\"\", data=None): self.action = action", "class Rsponse: def __init__(self): self.status = False self.data = None", "action self.data = data class Rsponse: def __init__(self): self.status =", "data class Rsponse: def __init__(self): self.status = False self.data =", "data=None): self.action = action self.data = data class Rsponse: def", "self.data = None def __init__(self): pass def set_request(self): pass def", "False self.data = None def __init__(self): pass def set_request(self): pass", "# @author: <NAME> <<EMAIL>> class Message: class Request: def __init__(self,", "self.status = False self.data = None def __init__(self): pass def", "= data class Rsponse: def __init__(self): self.status = False self.data", "class Message: class Request: def __init__(self, action=\"\", data=None): self.action =", "= None def __init__(self): pass def set_request(self): pass def response(self):", "# Message class Implementation # @author: <NAME> <<EMAIL>> class Message:", "__init__(self, action=\"\", data=None): self.action = action self.data = data class", "__init__(self): self.status = False self.data = None def __init__(self): pass", "= False self.data = None def __init__(self): pass def set_request(self):", "action=\"\", data=None): self.action = action self.data = data class Rsponse:", "def __init__(self): self.status = False self.data = None def __init__(self):", "def __init__(self, action=\"\", data=None): self.action = action self.data = data", "class Implementation # @author: <NAME> <<EMAIL>> class Message: class Request:", "class Request: def __init__(self, action=\"\", data=None): self.action = action self.data", "= action self.data = data class Rsponse: def __init__(self): self.status", "self.action = action self.data = data class Rsponse: def __init__(self):", "self.data = data class Rsponse: def __init__(self): self.status = False" ]
[ "time def web_socket_do_extra_handshake(request): pass # Always accept. def web_socket_transfer_data(request): time.sleep(3)", "msgutil import time def web_socket_do_extra_handshake(request): pass # Always accept. def", "def web_socket_do_extra_handshake(request): pass # Always accept. def web_socket_transfer_data(request): time.sleep(3) msgutil.send_message(request,", "from mod_pywebsocket import msgutil import time def web_socket_do_extra_handshake(request): pass #", "import time def web_socket_do_extra_handshake(request): pass # Always accept. def web_socket_transfer_data(request):", "web_socket_do_extra_handshake(request): pass # Always accept. def web_socket_transfer_data(request): time.sleep(3) msgutil.send_message(request, \"line\")", "import msgutil import time def web_socket_do_extra_handshake(request): pass # Always accept.", "#!/usr/bin/python from mod_pywebsocket import msgutil import time def web_socket_do_extra_handshake(request): pass", "mod_pywebsocket import msgutil import time def web_socket_do_extra_handshake(request): pass # Always" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "-> List[str]: with PSRPHook(self.conn_id) as hook: ps = hook.invoke_powershell( f\"cmd.exe", "self, *, psrp_conn_id: str, command: Optional[str] = None, powershell: Optional[str]", "OF ANY # KIND, either express or implied. See the", "from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook if TYPE_CHECKING: from airflow.utils.context import Context", "airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook if TYPE_CHECKING: from airflow.utils.context import Context class", "*, psrp_conn_id: str, command: Optional[str] = None, powershell: Optional[str] =", "Optional[str] = None, **kwargs, ) -> None: super().__init__(**kwargs) if not", "with PSRPHook(self.conn_id) as hook: ps = hook.invoke_powershell( f\"cmd.exe /c @'\\n{self.command}\\n'@\"", "ps = hook.invoke_powershell( f\"cmd.exe /c @'\\n{self.command}\\n'@\" if self.command else self.powershell", "Software Foundation (ASF) under one # or more contributor license", "more contributor license agreements. See the NOTICE file # distributed", "Unless required by applicable law or agreed to in writing,", "on remote host. (templated) :type command: str :param powershell: powershell", "not (command or powershell): raise ValueError(\"Must provide either 'command' or", "an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "airflow.exceptions import AirflowException from airflow.models import BaseOperator from airflow.providers.microsoft.psrp.hooks.psrp import", "# regarding copyright ownership. The ASF licenses this file #", "f\"cmd.exe /c @'\\n{self.command}\\n'@\" if self.command else self.powershell ) if ps.had_errors:", "Apache Software Foundation (ASF) under one # or more contributor", "PSRPOperator(BaseOperator): \"\"\"PowerShell Remoting Protocol operator. :param psrp_conn_id: connection id :type", "the License. from typing import TYPE_CHECKING, List, Optional, Sequence from", "in compliance # with the License. You may obtain a", "# to you under the Apache License, Version 2.0 (the", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "License for the # specific language governing permissions and limitations", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "host. (templated) :type powershell: str \"\"\" template_fields: Sequence[str] = (", "with this work for additional information # regarding copyright ownership.", "command: str :param powershell: powershell to execute on remote host.", "(ASF) under one # or more contributor license agreements. See", "2.0 (the # \"License\"); you may not use this file", "OR CONDITIONS OF ANY # KIND, either express or implied.", "typing import TYPE_CHECKING, List, Optional, Sequence from airflow.exceptions import AirflowException", "'command' or 'powershell'\") self.conn_id = psrp_conn_id self.command = command self.powershell", "# or more contributor license agreements. See the NOTICE file", "agreed to in writing, # software distributed under the License", "{\"command\": \"powershell\", \"powershell\": \"powershell\"} ui_color = \"#901dd2\" def __init__( self,", "from airflow.utils.context import Context class PSRPOperator(BaseOperator): \"\"\"PowerShell Remoting Protocol operator.", "psrp_conn_id self.command = command self.powershell = powershell def execute(self, context:", "def execute(self, context: \"Context\") -> List[str]: with PSRPHook(self.conn_id) as hook:", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "import Context class PSRPOperator(BaseOperator): \"\"\"PowerShell Remoting Protocol operator. :param psrp_conn_id:", "str :param powershell: powershell to execute on remote host. (templated)", "= ( \"command\", \"powershell\", ) template_fields_renderers = {\"command\": \"powershell\", \"powershell\":", "hook: ps = hook.invoke_powershell( f\"cmd.exe /c @'\\n{self.command}\\n'@\" if self.command else", "work for additional information # regarding copyright ownership. The ASF", "if TYPE_CHECKING: from airflow.utils.context import Context class PSRPOperator(BaseOperator): \"\"\"PowerShell Remoting", "specific language governing permissions and limitations # under the License.", "under the License is distributed on an # \"AS IS\"", "this file # to you under the Apache License, Version", "distributed under the License is distributed on an # \"AS", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "copyright ownership. The ASF licenses this file # to you", "powershell: str \"\"\" template_fields: Sequence[str] = ( \"command\", \"powershell\", )", "# software distributed under the License is distributed on an", "(templated) :type powershell: str \"\"\" template_fields: Sequence[str] = ( \"command\",", "template_fields_renderers = {\"command\": \"powershell\", \"powershell\": \"powershell\"} ui_color = \"#901dd2\" def", "= None, **kwargs, ) -> None: super().__init__(**kwargs) if not (command", "powershell def execute(self, context: \"Context\") -> List[str]: with PSRPHook(self.conn_id) as", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "= psrp_conn_id self.command = command self.powershell = powershell def execute(self,", "PSRPHook if TYPE_CHECKING: from airflow.utils.context import Context class PSRPOperator(BaseOperator): \"\"\"PowerShell", ":type powershell: str \"\"\" template_fields: Sequence[str] = ( \"command\", \"powershell\",", "(the # \"License\"); you may not use this file except", "the License. You may obtain a copy of the License", "ValueError(\"Must provide either 'command' or 'powershell'\") self.conn_id = psrp_conn_id self.command", "in writing, # software distributed under the License is distributed", "powershell to execute on remote host. (templated) :type powershell: str", "def __init__( self, *, psrp_conn_id: str, command: Optional[str] = None,", "\"powershell\", ) template_fields_renderers = {\"command\": \"powershell\", \"powershell\": \"powershell\"} ui_color =", ") -> None: super().__init__(**kwargs) if not (command or powershell): raise", "distributed with this work for additional information # regarding copyright", "to execute on remote host. (templated) :type powershell: str \"\"\"", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "List, Optional, Sequence from airflow.exceptions import AirflowException from airflow.models import", "License is distributed on an # \"AS IS\" BASIS, WITHOUT", "ASF licenses this file # to you under the Apache", "under the Apache License, Version 2.0 (the # \"License\"); you", "for the # specific language governing permissions and limitations #", "limitations # under the License. from typing import TYPE_CHECKING, List,", "Optional, Sequence from airflow.exceptions import AirflowException from airflow.models import BaseOperator", "airflow.models import BaseOperator from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook if TYPE_CHECKING: from", "or 'powershell'\") self.conn_id = psrp_conn_id self.command = command self.powershell =", "distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "regarding copyright ownership. The ASF licenses this file # to", "= \"#901dd2\" def __init__( self, *, psrp_conn_id: str, command: Optional[str]", "See the License for the # specific language governing permissions", "to in writing, # software distributed under the License is", "or agreed to in writing, # software distributed under the", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "airflow.utils.context import Context class PSRPOperator(BaseOperator): \"\"\"PowerShell Remoting Protocol operator. :param", "# under the License. from typing import TYPE_CHECKING, List, Optional,", "ui_color = \"#901dd2\" def __init__( self, *, psrp_conn_id: str, command:", "None: super().__init__(**kwargs) if not (command or powershell): raise ValueError(\"Must provide", "psrp_conn_id: connection id :type psrp_conn_id: str :param command: command to", "ownership. The ASF licenses this file # to you under", "= None, powershell: Optional[str] = None, **kwargs, ) -> None:", "and limitations # under the License. from typing import TYPE_CHECKING,", "# \"License\"); you may not use this file except in", "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "\"powershell\", \"powershell\": \"powershell\"} ui_color = \"#901dd2\" def __init__( self, *,", "to the Apache Software Foundation (ASF) under one # or", "import PSRPHook if TYPE_CHECKING: from airflow.utils.context import Context class PSRPOperator(BaseOperator):", "either 'command' or 'powershell'\") self.conn_id = psrp_conn_id self.command = command", "\"License\"); you may not use this file except in compliance", "file # distributed with this work for additional information #", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "class PSRPOperator(BaseOperator): \"\"\"PowerShell Remoting Protocol operator. :param psrp_conn_id: connection id", "execute on remote host. (templated) :type powershell: str \"\"\" template_fields:", "with the License. You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "\"powershell\": \"powershell\"} ui_color = \"#901dd2\" def __init__( self, *, psrp_conn_id:", "self.command = command self.powershell = powershell def execute(self, context: \"Context\")", "\"\"\" template_fields: Sequence[str] = ( \"command\", \"powershell\", ) template_fields_renderers =", "or more contributor license agreements. See the NOTICE file #", "(templated) :type command: str :param powershell: powershell to execute on", "as hook: ps = hook.invoke_powershell( f\"cmd.exe /c @'\\n{self.command}\\n'@\" if self.command", "powershell: powershell to execute on remote host. (templated) :type powershell:", "remote host. (templated) :type powershell: str \"\"\" template_fields: Sequence[str] =", "from airflow.models import BaseOperator from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook if TYPE_CHECKING:", "None, **kwargs, ) -> None: super().__init__(**kwargs) if not (command or", "applicable law or agreed to in writing, # software distributed", "# distributed with this work for additional information # regarding", "this work for additional information # regarding copyright ownership. The", "/c @'\\n{self.command}\\n'@\" if self.command else self.powershell ) if ps.had_errors: raise", "command: command to execute on remote host. (templated) :type command:", "writing, # software distributed under the License is distributed on", "the NOTICE file # distributed with this work for additional", "\"\"\"PowerShell Remoting Protocol operator. :param psrp_conn_id: connection id :type psrp_conn_id:", "execute on remote host. (templated) :type command: str :param powershell:", "else self.powershell ) if ps.had_errors: raise AirflowException(\"Process failed\") return ps.output", "language governing permissions and limitations # under the License. from", "\"powershell\"} ui_color = \"#901dd2\" def __init__( self, *, psrp_conn_id: str,", "self.powershell = powershell def execute(self, context: \"Context\") -> List[str]: with", "is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES", "import BaseOperator from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook if TYPE_CHECKING: from airflow.utils.context", "implied. See the License for the # specific language governing", "file # to you under the Apache License, Version 2.0", "Sequence[str] = ( \"command\", \"powershell\", ) template_fields_renderers = {\"command\": \"powershell\",", "\"command\", \"powershell\", ) template_fields_renderers = {\"command\": \"powershell\", \"powershell\": \"powershell\"} ui_color", "to you under the Apache License, Version 2.0 (the #", "CONDITIONS OF ANY # KIND, either express or implied. See", "# with the License. You may obtain a copy of", "self.conn_id = psrp_conn_id self.command = command self.powershell = powershell def", "None, powershell: Optional[str] = None, **kwargs, ) -> None: super().__init__(**kwargs)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "context: \"Context\") -> List[str]: with PSRPHook(self.conn_id) as hook: ps =", "host. (templated) :type command: str :param powershell: powershell to execute", "super().__init__(**kwargs) if not (command or powershell): raise ValueError(\"Must provide either", "may not use this file except in compliance # with", "under the License. from typing import TYPE_CHECKING, List, Optional, Sequence", "from airflow.exceptions import AirflowException from airflow.models import BaseOperator from airflow.providers.microsoft.psrp.hooks.psrp", "permissions and limitations # under the License. from typing import", "software distributed under the License is distributed on an #", "Licensed to the Apache Software Foundation (ASF) under one #", "remote host. (templated) :type command: str :param powershell: powershell to", "for additional information # regarding copyright ownership. The ASF licenses", "the Apache Software Foundation (ASF) under one # or more", "# # Unless required by applicable law or agreed to", "Version 2.0 (the # \"License\"); you may not use this", "under one # or more contributor license agreements. See the", "command: Optional[str] = None, powershell: Optional[str] = None, **kwargs, )", "psrp_conn_id: str, command: Optional[str] = None, powershell: Optional[str] = None,", "__init__( self, *, psrp_conn_id: str, command: Optional[str] = None, powershell:", "one # or more contributor license agreements. See the NOTICE", "(command or powershell): raise ValueError(\"Must provide either 'command' or 'powershell'\")", "License, Version 2.0 (the # \"License\"); you may not use", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "either express or implied. See the License for the #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", ") template_fields_renderers = {\"command\": \"powershell\", \"powershell\": \"powershell\"} ui_color = \"#901dd2\"", "str, command: Optional[str] = None, powershell: Optional[str] = None, **kwargs,", "= command self.powershell = powershell def execute(self, context: \"Context\") ->", "KIND, either express or implied. See the License for the", "information # regarding copyright ownership. The ASF licenses this file", "the Apache License, Version 2.0 (the # \"License\"); you may", "Remoting Protocol operator. :param psrp_conn_id: connection id :type psrp_conn_id: str", "import TYPE_CHECKING, List, Optional, Sequence from airflow.exceptions import AirflowException from", "id :type psrp_conn_id: str :param command: command to execute on", "except in compliance # with the License. You may obtain", "additional information # regarding copyright ownership. The ASF licenses this", "@'\\n{self.command}\\n'@\" if self.command else self.powershell ) if ps.had_errors: raise AirflowException(\"Process", "TYPE_CHECKING: from airflow.utils.context import Context class PSRPOperator(BaseOperator): \"\"\"PowerShell Remoting Protocol", "**kwargs, ) -> None: super().__init__(**kwargs) if not (command or powershell):", "connection id :type psrp_conn_id: str :param command: command to execute", "you under the Apache License, Version 2.0 (the # \"License\");", "or implied. See the License for the # specific language", "See the NOTICE file # distributed with this work for", "# KIND, either express or implied. See the License for", "express or implied. See the License for the # specific", "Optional[str] = None, powershell: Optional[str] = None, **kwargs, ) ->", "command self.powershell = powershell def execute(self, context: \"Context\") -> List[str]:", "NOTICE file # distributed with this work for additional information", "governing permissions and limitations # under the License. from typing", "if not (command or powershell): raise ValueError(\"Must provide either 'command'", "= powershell def execute(self, context: \"Context\") -> List[str]: with PSRPHook(self.conn_id)", "AirflowException from airflow.models import BaseOperator from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook if", "this file except in compliance # with the License. You", "psrp_conn_id: str :param command: command to execute on remote host.", "= {\"command\": \"powershell\", \"powershell\": \"powershell\"} ui_color = \"#901dd2\" def __init__(", ":type command: str :param powershell: powershell to execute on remote", "hook.invoke_powershell( f\"cmd.exe /c @'\\n{self.command}\\n'@\" if self.command else self.powershell ) if", "agreements. See the NOTICE file # distributed with this work", "Apache License, Version 2.0 (the # \"License\"); you may not", ":type psrp_conn_id: str :param command: command to execute on remote", "( \"command\", \"powershell\", ) template_fields_renderers = {\"command\": \"powershell\", \"powershell\": \"powershell\"}", "List[str]: with PSRPHook(self.conn_id) as hook: ps = hook.invoke_powershell( f\"cmd.exe /c", "the # specific language governing permissions and limitations # under", "licenses this file # to you under the Apache License,", "Context class PSRPOperator(BaseOperator): \"\"\"PowerShell Remoting Protocol operator. :param psrp_conn_id: connection", "license agreements. See the NOTICE file # distributed with this", "template_fields: Sequence[str] = ( \"command\", \"powershell\", ) template_fields_renderers = {\"command\":", "TYPE_CHECKING, List, Optional, Sequence from airflow.exceptions import AirflowException from airflow.models", "powershell): raise ValueError(\"Must provide either 'command' or 'powershell'\") self.conn_id =", "= hook.invoke_powershell( f\"cmd.exe /c @'\\n{self.command}\\n'@\" if self.command else self.powershell )", "required by applicable law or agreed to in writing, #", "by applicable law or agreed to in writing, # software", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "command to execute on remote host. (templated) :type command: str", "provide either 'command' or 'powershell'\") self.conn_id = psrp_conn_id self.command =", "PSRPHook(self.conn_id) as hook: ps = hook.invoke_powershell( f\"cmd.exe /c @'\\n{self.command}\\n'@\" if", "Sequence from airflow.exceptions import AirflowException from airflow.models import BaseOperator from", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "BaseOperator from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook if TYPE_CHECKING: from airflow.utils.context import", "The ASF licenses this file # to you under the", ":param psrp_conn_id: connection id :type psrp_conn_id: str :param command: command", ":param powershell: powershell to execute on remote host. (templated) :type", "file except in compliance # with the License. You may", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "# specific language governing permissions and limitations # under the", "Protocol operator. :param psrp_conn_id: connection id :type psrp_conn_id: str :param", "self.command else self.powershell ) if ps.had_errors: raise AirflowException(\"Process failed\") return", "the License for the # specific language governing permissions and", "\"#901dd2\" def __init__( self, *, psrp_conn_id: str, command: Optional[str] =", "powershell: Optional[str] = None, **kwargs, ) -> None: super().__init__(**kwargs) if", ":param command: command to execute on remote host. (templated) :type", "License. You may obtain a copy of the License at", "-> None: super().__init__(**kwargs) if not (command or powershell): raise ValueError(\"Must", "You may obtain a copy of the License at #", "ANY # KIND, either express or implied. See the License", "or powershell): raise ValueError(\"Must provide either 'command' or 'powershell'\") self.conn_id", "# Licensed to the Apache Software Foundation (ASF) under one", "the License is distributed on an # \"AS IS\" BASIS,", "from typing import TYPE_CHECKING, List, Optional, Sequence from airflow.exceptions import", "you may not use this file except in compliance #", "execute(self, context: \"Context\") -> List[str]: with PSRPHook(self.conn_id) as hook: ps", "operator. :param psrp_conn_id: connection id :type psrp_conn_id: str :param command:", "\"Context\") -> List[str]: with PSRPHook(self.conn_id) as hook: ps = hook.invoke_powershell(", "'powershell'\") self.conn_id = psrp_conn_id self.command = command self.powershell = powershell", "use this file except in compliance # with the License.", "compliance # with the License. You may obtain a copy", "# # Licensed to the Apache Software Foundation (ASF) under", "to execute on remote host. (templated) :type command: str :param", "raise ValueError(\"Must provide either 'command' or 'powershell'\") self.conn_id = psrp_conn_id", "law or agreed to in writing, # software distributed under", "str :param command: command to execute on remote host. (templated)", "contributor license agreements. See the NOTICE file # distributed with", "on remote host. (templated) :type powershell: str \"\"\" template_fields: Sequence[str]", "if self.command else self.powershell ) if ps.had_errors: raise AirflowException(\"Process failed\")", "Foundation (ASF) under one # or more contributor license agreements.", "str \"\"\" template_fields: Sequence[str] = ( \"command\", \"powershell\", ) template_fields_renderers", "import AirflowException from airflow.models import BaseOperator from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook", "not use this file except in compliance # with the", "License. from typing import TYPE_CHECKING, List, Optional, Sequence from airflow.exceptions" ]
[ "of component names to ids.\"\"\" component_names_lower = [name.lower() for name", "blocking=convert_issue_ids(issue.blocking_iids, mar, services), canComment=permissions.CanCommentIssue( mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms), canEdit=permissions.CanEditIssue(", "tracker_pb2.FieldTypes.STR_TYPE: new_fv.str_value = fv.fieldValue elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE: new_fv.int_value =", "api_pb2_v1.ProjectIssueConfig( kind='monorail#projectIssueConfig', restrictToKnown=config.restrict_to_known, defaultColumns=config.default_col_spec.split(), defaultSorting=config.default_sort_spec.split(), statuses=[convert_status(s) for s in config.well_known_statuses],", "project: project_id = project.project_id try: issue = services.issue.GetIssueByLocalID( mar.cnxn, project_id,", "cd in config.component_defs: cid = cd.component_id if cid in issue.component_ids:", "logging import time from framework import framework_constants from framework import", "= api_pb2_v1.FieldValue( fieldName=field_name, fieldValue=val, derived=fv.derived) field_values_list.append(new_fv) resp = cls( kind='monorail#issue',", "result def convert_field_values(field_values, mar, services): \"\"\"Convert user passed in field", "API ProjectWrapper PB.\"\"\" return api_pb2_v1.ProjectWrapper( kind='monorail#project', name=project.project_name, externalId=project.project_name, htmlLink='/p/%s/' %", "return None try: user = services.user.GetUser(cnxn, user_id) except user_svc.NoSuchUserException as", "global issue ids to API IssueRef PB.\"\"\" # missed issue", "== tracker_pb2.FieldID.CC: for user_id in amendment.added_user_ids: user_email = _get_user_email( services.user,", "if issueref_pbs: result = [] for ir in issueref_pbs: project_id", "add and items to remove.\"\"\" list_to_add = [] list_to_remove =", "services.project.GetProject(mar.cnxn, issue.project_id) component_list = [] for cd in config.component_defs: cid", "project_name = pair_ary[0] local_id = int(pair_ary[1]) project = services.project.GetProjectByName(mar.cnxn, project_name)", "= pair.split(':') project_name = pair_ary[0] local_id = int(pair_ary[1]) project =", "be found in the LICENSE file or at # https://developers.google.com/open-source/licenses/bsd", "elif amendment.field == tracker_pb2.FieldID.CUSTOM: fv = api_pb2_v1.FieldValue() fv.fieldName = amendment.custom_field_name", "split_remove_add(item_list): \"\"\"Split one list of items into two: items to", "filtered out. issues = services.issue.GetIssues(mar.cnxn, issue_ids) result = [] for", "datetime import logging import time from framework import framework_constants from", "cnxn, user_id): \"\"\"Get user email.\"\"\" try: user_email = user_service.LookupUserEmail( cnxn,", "services, trap_exception=False): \"\"\"Convert user id to API AtomPerson PB.\"\"\" if", "if fv.user_id: val = _get_user_email( services.user, mar.cnxn, fv.user_id) elif fv.str_value:", "return resp def convert_comment(issue, comment, mar, services, granted_perms): \"\"\"Convert Monorail", "result = [] for pair in project_local_id_pairs: issue_project_id = None", "IssuesGetInsertResponse.\"\"\" config = services.config.GetProjectConfig(mar.cnxn, issue.project_id) granted_perms = tracker_bizobj.GetGrantedPerms( issue, mar.auth.effective_ids,", "govered by a BSD-style # license that can be found", "PB to API IssueCommentWrapper.\"\"\" can_delete = permissions.CanDelete( mar.auth.user_id, mar.auth.effective_ids, mar.perms,", "Label PB.\"\"\" return api_pb2_v1.Label( label=label.label, description=label.label_docstring) def convert_template(template): \"\"\"Convert Monorail", "issue.issue_id, mar.auth.user_id), status=issue.status, state=(api_pb2_v1.IssueState.open if tracker_helpers.MeansOpenInProject( tracker_bizobj.GetStatus(issue), config) else api_pb2_v1.IssueState.closed),", "mar, services): \"\"\"Convert Monorail Issue PB to API IssuesGetInsertResponse.\"\"\" config", "for uid in cd.admin_ids]), cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]), deprecated=cd.deprecated)", "ir in issueref_pbs: project_id = mar.project_id if ir.projectId: project =", "= [] fv_list_clear = [] label_list_add = [] label_list_remove =", "= amendment.newvalue elif amendment.field == tracker_pb2.FieldID.OWNER: if len(amendment.added_user_ids) == 0:", "ids.\"\"\" if issueref_pbs: result = [] for ir in issueref_pbs:", "PB to API ProjectWrapper PB.\"\"\" return api_pb2_v1.ProjectWrapper( kind='monorail#project', name=project.project_name, externalId=project.project_name,", "mar.project_id if ir.projectId: project = services.project.GetProjectByName( mar.cnxn, ir.projectId) if project:", "config.component_defs: cpath = cd.path if cpath.lower() in component_names_lower: result.append(cd.component_id) return", "except user_svc.NoSuchUserException as ex: if trap_exception: logging.warning(str(ex)) return None else:", "= [] id_list = issue_ids.split() for id_str in id_list: if", "user_id): \"\"\"Get user email.\"\"\" try: user_email = user_service.LookupUserEmail( cnxn, user_id)", "= int(pair) result.append( services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id)) return result def convert_group_settings(group_name,", "API Status PB.\"\"\" return api_pb2_v1.Status( status=status.status, meansOpen=status.means_open, description=status.status_docstring) def convert_label(label):", "granted_perms=granted_perms), canEdit=permissions.CanEditIssue( mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms), fieldValues=field_values_list) if issue.closed_timestamp", "api_pb2_v1.Update(kind='monorail#issueCommentUpdate') for amendment in amendments: if amendment.field == tracker_pb2.FieldID.SUMMARY: result.summary", "user_email = _get_user_email( services.user, mar.cnxn, user_id) result.cc.append(user_email) for user_id in", "name in component_names] result = [] for cd in config.component_defs:", "component.modifier = user_names_dict[cd.modifier_id] return component def convert_component_ids(config, component_names): \"\"\"Convert a", "= [] for item in item_list: if item.startswith('-'): list_to_remove.append(item[1:]) else:", "fieldValues=field_values_list) if issue.closed_timestamp > 0: resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp) if issue.merged_into:", "tracker import tracker_bizobj from tracker import tracker_helpers def convert_project(project, config,", "% (framework_helpers.GetHostPort(), user_id), last_visit_days_ago=days_ago, email_bouncing=bool(user.email_bounce_timestamp), vacation_message=user.vacation_message) def convert_issue_ids(issue_ids, mar, services):", "issues = services.issue.GetIssues(mar.cnxn, issue_ids) result = [] for issue in", "field_id_dict.get(fv.field_id) if not field_name: logging.warning('Custom field %d of project %s", "id_str.startswith('-'): result.append('-%s:%s' % (project_name, id_str[1:])) else: result.append('%s:%s' % (project_name, id_str))", "local_id)) return result def convert_group_settings(group_name, setting): \"\"\"Convert UserGroupSettings to UserGroupSettingsWrapper.\"\"\"", "result.append(cd.component_id) return result def convert_field_values(field_values, mar, services): \"\"\"Convert user passed", "cnxn, services, trap_exception=False): \"\"\"Convert user id to API AtomPerson PB.\"\"\"", "component.modified = datetime.datetime.fromtimestamp(cd.modified) component.modifier = user_names_dict[cd.modifier_id] return component def convert_component_ids(config,", "':' in pair: pair_ary = pair.split(':') project_name = pair_ary[0] local_id", "convert_issue(cls, issue, mar, services): \"\"\"Convert Monorail Issue PB to API", "pairs.\"\"\" result = [] for pair in project_local_id_pairs: issue_project_id =", "labels.\"\"\" fv_list_add = [] fv_list_remove = [] fv_list_clear = []", "label_list_add.append(raw_val) else: logging.warning('Unsupported field value operater %s', fv.operator) else: new_fv", "in issue.cc_ids] cc_list = [p for p in cc_list if", "resp def convert_comment(issue, comment, mar, services, granted_perms): \"\"\"Convert Monorail IssueComment", "convert_project(project, config, role): \"\"\"Convert Monorail Project PB to API ProjectWrapper", "in the LICENSE file or at # https://developers.google.com/open-source/licenses/bsd \"\"\"Convert Monorail", "= tracker_bizobj.GetGrantedPerms( issue, mar.auth.effective_ids, config) issue_project = services.project.GetProject(mar.cnxn, issue.project_id) component_list", "pair_ary = pair.split(':') project_name = pair_ary[0] local_id = int(pair_ary[1]) project", "= services.project.GetProjectByName( mar.cnxn, ir.projectId) if project: project_id = project.project_id try:", "if ':' in pair: pair_ary = pair.split(':') project_name = pair_ary[0]", "return None def convert_issue(cls, issue, mar, services): \"\"\"Convert Monorail Issue", "state=(api_pb2_v1.IssueState.open if tracker_helpers.MeansOpenInProject( tracker_bizobj.GetStatus(issue), config) else api_pb2_v1.IssueState.closed), labels=issue.labels, components=component_list, author=convert_person(issue.reporter_id,", "item in item_list: if item.startswith('-'): list_to_remove.append(item[1:]) else: list_to_add.append(item) return list_to_add,", "logging.warning( 'Unsupported field value type %s', field_def.field_type) if fv.operator ==", "id_list = issue_ids.split() for id_str in id_list: if ':' in", "does not exist.' % (ir.projectId, ir.issueId)) return result else: return", "project_id local_id = int(pair) result.append( services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id)) return result", "PB.\"\"\" return api_pb2_v1.ProjectIssueConfig( kind='monorail#projectIssueConfig', restrictToKnown=config.restrict_to_known, defaultColumns=config.default_col_spec.split(), defaultSorting=config.default_sort_spec.split(), statuses=[convert_status(s) for s", "file or at # https://developers.google.com/open-source/licenses/bsd \"\"\"Convert Monorail PB objects to", "convert_issueref_pbs(issueref_pbs, mar, services): \"\"\"Convert API IssueRef PBs to global issue", "Status PB.\"\"\" return api_pb2_v1.Status( status=status.status, meansOpen=status.means_open, description=status.status_docstring) def convert_label(label): \"\"\"Convert", "% (project_name, id_str)) return result def split_remove_add(item_list): \"\"\"Split one list", "\"\"\"Convert UserGroupSettings to UserGroupSettingsWrapper.\"\"\" return api_pb2_v1.UserGroupSettingsWrapper( groupName=group_name, who_can_view_members=setting.who_can_view_members, ext_group_type=setting.ext_group_type, last_sync_time=setting.last_sync_time)", "a in comment.attachments], author=convert_person(comment.user_id, mar.cnxn, services, trap_exception=True), canDelete=can_delete, content=comment.content, deletedBy=convert_person(comment.deleted_by,", "PB.\"\"\" return api_pb2_v1.ProjectWrapper( kind='monorail#project', name=project.project_name, externalId=project.project_name, htmlLink='/p/%s/' % project.project_name, summary=project.summary,", "description=project.description, role=role, issuesConfig=convert_project_config(config)) def convert_project_config(config): \"\"\"Convert Monorail ProjectIssueConfig PB to", "user_svc.NoSuchUserException as ex: if trap_exception: logging.warning(str(ex)) return None else: raise", "cd.component_id if cid in issue.component_ids: component_list.append(cd.path) cc_list = [convert_person(p, mar.cnxn,", "result.append(issue.issue_id) except issue_svc.NoSuchIssueException: logging.warning( 'Issue (%s:%d) does not exist.' %", "fv.user_id: val = _get_user_email( services.user, mar.cnxn, fv.user_id) elif fv.str_value: val", "published=datetime.datetime.fromtimestamp(issue.opened_timestamp), blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services), blocking=convert_issue_ids(issue.blocking_iids, mar, services), canComment=permissions.CanCommentIssue( mar.auth.effective_ids, mar.perms,", "ids to API IssueRef PB.\"\"\" # missed issue ids are", "<id> to <project>:<id> format.\"\"\" result = [] id_list = issue_ids.split()", "to global issue ids.\"\"\" if issueref_pbs: result = [] for", "IssueRef PB.\"\"\" # missed issue ids are filtered out. issues", "not exist' % project_name) issue_project_id = project.project_id else: issue_project_id =", "from tracker import tracker_bizobj from tracker import tracker_helpers def convert_project(project,", "= amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CUSTOM: fv = api_pb2_v1.FieldValue() fv.fieldName", "out. issues = services.issue.GetIssues(mar.cnxn, issue_ids) result = [] for issue", "raise project_svc.NoSuchProjectException( 'Project %s does not exist' % project_name) issue_project_id", "issue.owner_modified_timestamp) if issue.status_modified_timestamp: resp.status_modified = datetime.datetime.fromtimestamp( issue.status_modified_timestamp) if issue.component_modified_timestamp: resp.component_modified", "amendment.field == tracker_pb2.FieldID.CC: for user_id in amendment.added_user_ids: user_email = _get_user_email(", "= services.project.GetProjectByName(mar.cnxn, project_name) if not project: raise project_svc.NoSuchProjectException( 'Project %s", "description=label.label_docstring) def convert_template(template): \"\"\"Convert Monorail TemplateDef PB to API Prompt", "for uid in cd.cc_ids]), deprecated=cd.deprecated) if cd.created: component.created = datetime.datetime.fromtimestamp(cd.created)", "ir.projectId) if project: project_id = project.project_id try: issue = services.issue.GetIssueByLocalID(", "import issue_svc from services import project_svc from services import user_svc", "defaultPromptForMembers=config.default_template_for_developers, defaultPromptForNonMembers=config.default_template_for_users) def convert_status(status): \"\"\"Convert Monorail StatusDef PB to API", "mar, services): \"\"\"Convert global issue ids to API IssueRef PB.\"\"\"", "services.user.GetUser(cnxn, user_id) except user_svc.NoSuchUserException as ex: if trap_exception: logging.warning(str(ex)) return", "in amendments: if amendment.field == tracker_pb2.FieldID.SUMMARY: result.summary = amendment.newvalue elif", "def convert_group_settings(group_name, setting): \"\"\"Convert UserGroupSettings to UserGroupSettingsWrapper.\"\"\" return api_pb2_v1.UserGroupSettingsWrapper( groupName=group_name,", "field value operater %s', fv.operator) return (fv_list_add, fv_list_remove, fv_list_clear, label_list_add,", "%s does not exist', fv.field_id, issue_project.project_name) continue val = None", "API IssueCommentWrapper.\"\"\" can_delete = permissions.CanDelete( mar.auth.user_id, mar.auth.effective_ids, mar.perms, comment.deleted_by, comment.user_id,", "ProjectWrapper PB.\"\"\" return api_pb2_v1.ProjectWrapper( kind='monorail#project', name=project.project_name, externalId=project.project_name, htmlLink='/p/%s/' % project.project_name,", "\"\"\"Convert Monorail TemplateDef PB to API Prompt PB.\"\"\" return api_pb2_v1.Prompt(", "content=comment.content, deletedBy=convert_person(comment.deleted_by, mar.cnxn, services, trap_exception=True), id=comment.sequence, published=datetime.datetime.fromtimestamp(comment.timestamp), updates=convert_amendments(issue, comment.amendments, mar,", "exist', fv.field_id, issue_project.project_name) continue val = None if fv.user_id: val", "api_pb2_v1.FieldValueOperator.add: label_list_add.append(raw_val) else: logging.warning('Unsupported field value operater %s', fv.operator) else:", "cd.modified: component.modified = datetime.datetime.fromtimestamp(cd.modified) component.modifier = user_names_dict[cd.modifier_id] return component def", "id_str)) return result def split_remove_add(item_list): \"\"\"Split one list of items", "mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms), fieldValues=field_values_list) if issue.closed_timestamp > 0:", "defaultColumns=config.default_col_spec.split(), defaultSorting=config.default_sort_spec.split(), statuses=[convert_status(s) for s in config.well_known_statuses], labels=[convert_label(l) for l", "not field_def: logging.warning('Custom field %s of does not exist', fv.fieldName)", "user passed in field value list to FieldValue PB, or", "days_ago = secs_ago / framework_constants.SECS_PER_DAY return api_pb2_v1.AtomPerson( kind='monorail#issuePerson', name=user.email, htmlLink='https://%s/u/%d'", "api_pb2_v1.FieldValueOperator.remove: label_list_remove.append(raw_val) elif fv.operator == api_pb2_v1.FieldValueOperator.add: label_list_add.append(raw_val) else: logging.warning('Unsupported field", "= [] list_to_remove = [] for item in item_list: if", "amendment.field == tracker_pb2.FieldID.BLOCKEDON: result.blockedOn = _append_project( amendment.newvalue, issue.project_name) elif amendment.field", "result.append( services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id)) return result def convert_group_settings(group_name, setting): \"\"\"Convert", "0: resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp) if issue.merged_into: resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0] if", "\"\"\"Convert Monorail Attachment PB to API Attachment.\"\"\" return api_pb2_v1.Attachment( attachmentId=attachment.attachment_id,", "id_str: result.append(id_str) # '-' means this issue is being removed", "result.cc.append(user_email) for user_id in amendment.removed_user_ids: user_email = _get_user_email( services.user, mar.cnxn,", "# '-' means this issue is being removed elif id_str.startswith('-'):", "import framework_helpers from framework import permissions from framework import timestr", "field %s of does not exist', fv.fieldName) continue if fv.operator", "fields are stored as labels if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE: raw_val", "result.blocking = _append_project( amendment.newvalue, issue.project_name) elif amendment.field == tracker_pb2.FieldID.MERGEDINTO: result.mergedInto", "issue is being removed elif id_str.startswith('-'): result.append('-%s:%s' % (project_name, id_str[1:]))", "role): \"\"\"Convert Monorail Project PB to API ProjectWrapper PB.\"\"\" return", "Component PB.\"\"\" project_name = services.project.LookupProjectNames( mar.cnxn, [cd.project_id])[cd.project_id] user_ids = set()", "local_id = int(pair) result.append( services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id)) return result def", "PB to Component PB.\"\"\" project_name = services.project.LookupProjectNames( mar.cnxn, [cd.project_id])[cd.project_id] user_ids", "[name.lower() for name in component_names] result = [] for cd", "not exist.' % (ir.projectId, ir.issueId)) return result else: return None", "else: list_to_add.append(item) return list_to_add, list_to_remove # TODO(sheyang): batch the SQL", "user_id) result.cc.append('-%s' % user_email) elif amendment.field == tracker_pb2.FieldID.BLOCKEDON: result.blockedOn =", "in mar.config.field_defs} for fv in field_values: field_def = field_name_dict.get(fv.fieldName) if", "api_pb2_v1.Component( componentId=cd.component_id, projectName=project_name, componentPath=cd.path, description=cd.docstring, admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]),", "if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE: raw_val = '%s-%s' % (fv.fieldName, fv.fieldValue)", "api_pb2_v1.IssueRef( issueId=issue.local_id, projectId=issue.project_name, kind='monorail#issueRef') result.append(issue_ref) return result def convert_issueref_pbs(issueref_pbs, mar,", "issue.status_modified_timestamp: resp.status_modified = datetime.datetime.fromtimestamp( issue.status_modified_timestamp) if issue.component_modified_timestamp: resp.component_modified = datetime.datetime.fromtimestamp(", "amendment.newvalue elif amendment.field == tracker_pb2.FieldID.COMPONENTS: result.components = amendment.newvalue.split() elif amendment.field", "config.well_known_statuses], labels=[convert_label(l) for l in config.well_known_labels], prompts=[convert_template(t) for t in", "':' in id_str: result.append(id_str) # '-' means this issue is", "val = _get_user_email( services.user, mar.cnxn, fv.user_id) elif fv.str_value: val =", "+ [cd.modifier_id]) user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids)) component = api_pb2_v1.Component( componentId=cd.component_id,", "for user_id in amendment.added_user_ids: user_email = _get_user_email( services.user, mar.cnxn, user_id)", "fv.field_id, issue_project.project_name) continue val = None if fv.user_id: val =", "continue # Enum fields are stored as labels if field_def.field_type", "issue.project_id) component_list = [] for cd in config.component_defs: cid =", "config.well_known_labels], prompts=[convert_template(t) for t in config.templates], defaultPromptForMembers=config.default_template_for_developers, defaultPromptForNonMembers=config.default_template_for_users) def convert_status(status):", "ex days_ago = None if user.last_visit_timestamp: secs_ago = int(time.time()) -", "framework_constants.NO_USER_NAME else: result.owner = _get_user_email( services.user, mar.cnxn, amendment.added_user_ids[0]) elif amendment.field", "granted_perms=granted_perms), fieldValues=field_values_list) if issue.closed_timestamp > 0: resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp) if", "amendment.field == tracker_pb2.FieldID.BLOCKING: result.blocking = _append_project( amendment.newvalue, issue.project_name) elif amendment.field", "fv.fieldValue) except user_svc.NoSuchUserException: new_fv.user_id = 0 elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE:", "groupName=group_name, who_can_view_members=setting.who_can_view_members, ext_group_type=setting.ext_group_type, last_sync_time=setting.last_sync_time) def convert_component_def(cd, mar, services): \"\"\"Convert ComponentDef", "ProjectIssueConfig PB.\"\"\" return api_pb2_v1.ProjectIssueConfig( kind='monorail#projectIssueConfig', restrictToKnown=config.restrict_to_known, defaultColumns=config.default_col_spec.split(), defaultSorting=config.default_sort_spec.split(), statuses=[convert_status(s) for", "= project_id local_id = int(pair) result.append( services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id)) return", "[cd.creator_id] + [cd.modifier_id]) user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids)) component = api_pb2_v1.Component(", "starred=services.issue_star.IsItemStarredBy( mar.cnxn, issue.issue_id, mar.auth.user_id), status=issue.status, state=(api_pb2_v1.IssueState.open if tracker_helpers.MeansOpenInProject( tracker_bizobj.GetStatus(issue), config)", "for id_str in id_list: if ':' in id_str: result.append(id_str) #", "val = fv.str_value elif fv.int_value: val = str(fv.int_value) new_fv =", "services.issue.GetIssues(mar.cnxn, issue_ids) result = [] for issue in issues: issue_ref", "labels=[convert_label(l) for l in config.well_known_labels], prompts=[convert_template(t) for t in config.templates],", "config.field_defs} for fv in issue.field_values: field_name = field_id_dict.get(fv.field_id) if not", "to API IssueCommentWrapper.\"\"\" can_delete = permissions.CanDelete( mar.auth.user_id, mar.auth.effective_ids, mar.perms, comment.deleted_by,", "== 0: result.owner = framework_constants.NO_USER_NAME else: result.owner = _get_user_email( services.user,", "try: user_email = user_service.LookupUserEmail( cnxn, user_id) if not user_email: user_email", "services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id)) return result def convert_group_settings(group_name, setting): \"\"\"Convert UserGroupSettings", "to UserGroupSettingsWrapper.\"\"\" return api_pb2_v1.UserGroupSettingsWrapper( groupName=group_name, who_can_view_members=setting.who_can_view_members, ext_group_type=setting.ext_group_type, last_sync_time=setting.last_sync_time) def convert_component_def(cd,", "componentRequired=template.component_required) def convert_person(user_id, cnxn, services, trap_exception=False): \"\"\"Convert user id to", "in field value list to FieldValue PB, or labels.\"\"\" fv_list_add", "Attachment PB to API Attachment.\"\"\" return api_pb2_v1.Attachment( attachmentId=attachment.attachment_id, fileName=attachment.filename, fileSize=attachment.filesize,", "from services import project_svc from services import user_svc from tracker", "# license that can be found in the LICENSE file", "resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0] if issue.owner_modified_timestamp: resp.owner_modified = datetime.datetime.fromtimestamp( issue.owner_modified_timestamp) if", "resp.owner_modified = datetime.datetime.fromtimestamp( issue.owner_modified_timestamp) if issue.status_modified_timestamp: resp.status_modified = datetime.datetime.fromtimestamp( issue.status_modified_timestamp)", "the SQL queries to fetch projects/issues. def issue_global_ids(project_local_id_pairs, project_id, mar,", "result.append(issue_ref) return result def convert_issueref_pbs(issueref_pbs, mar, services): \"\"\"Convert API IssueRef", "= int(pair_ary[1]) project = services.project.GetProjectByName(mar.cnxn, project_name) if not project: raise", "updated=datetime.datetime.fromtimestamp(issue.modified_timestamp), published=datetime.datetime.fromtimestamp(issue.opened_timestamp), blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services), blocking=convert_issue_ids(issue.blocking_iids, mar, services), canComment=permissions.CanCommentIssue( mar.auth.effective_ids,", "being removed elif id_str.startswith('-'): result.append('-%s:%s' % (project_name, id_str[1:])) else: result.append('%s:%s'", "mar.cnxn, services), cc=cc_list, updated=datetime.datetime.fromtimestamp(issue.modified_timestamp), published=datetime.datetime.fromtimestamp(issue.opened_timestamp), blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services), blocking=convert_issue_ids(issue.blocking_iids, mar,", "one list of items into two: items to add and", "\"\"\"Convert Monorail LabelDef PB to API Label PB.\"\"\" return api_pb2_v1.Label(", "for l in config.well_known_labels], prompts=[convert_template(t) for t in config.templates], defaultPromptForMembers=config.default_template_for_developers,", "datetime.datetime.fromtimestamp( issue.owner_modified_timestamp) if issue.status_modified_timestamp: resp.status_modified = datetime.datetime.fromtimestamp( issue.status_modified_timestamp) if issue.component_modified_timestamp:", "def convert_issue(cls, issue, mar, services): \"\"\"Convert Monorail Issue PB to", "\"\"\"Append project name to convert <id> to <project>:<id> format.\"\"\" result", "API Label PB.\"\"\" return api_pb2_v1.Label( label=label.label, description=label.label_docstring) def convert_template(template): \"\"\"Convert", "mar.cnxn, issue.issue_id, mar.auth.user_id), status=issue.status, state=(api_pb2_v1.IssueState.open if tracker_helpers.MeansOpenInProject( tracker_bizobj.GetStatus(issue), config) else", "item_list: if item.startswith('-'): list_to_remove.append(item[1:]) else: list_to_add.append(item) return list_to_add, list_to_remove #", "None if ':' in pair: pair_ary = pair.split(':') project_name =", "= None if ':' in pair: pair_ary = pair.split(':') project_name", "in cd.admin_ids]), cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]), deprecated=cd.deprecated) if cd.created:", "0 elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE: new_fv.str_value = fv.fieldValue elif field_def.field_type", "component_list = [] for cd in config.component_defs: cid = cd.component_id", "framework import framework_constants from framework import framework_helpers from framework import", "IssueComment PB to API IssueCommentWrapper.\"\"\" can_delete = permissions.CanDelete( mar.auth.user_id, mar.auth.effective_ids,", "PB.\"\"\" return api_pb2_v1.Label( label=label.label, description=label.label_docstring) def convert_template(template): \"\"\"Convert Monorail TemplateDef", "summary=issue.summary, projectId=issue_project.project_name, stars=issue.star_count, starred=services.issue_star.IsItemStarredBy( mar.cnxn, issue.issue_id, mar.auth.user_id), status=issue.status, state=(api_pb2_v1.IssueState.open if", "for a in comment.attachments], author=convert_person(comment.user_id, mar.cnxn, services, trap_exception=True), canDelete=can_delete, content=comment.content,", "ComponentDef PB to Component PB.\"\"\" project_name = services.project.LookupProjectNames( mar.cnxn, [cd.project_id])[cd.project_id]", "for p in issue.cc_ids] cc_list = [p for p in", "= [] label_list_remove = [] field_name_dict = { fd.field_name: fd", "tracker_pb2.FieldID.LABELS: result.labels = amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CC: for user_id", "project_name): \"\"\"Append project name to convert <id> to <project>:<id> format.\"\"\"", "except user_svc.NoSuchUserException: user_email = framework_constants.DELETED_USER_NAME return user_email def _append_project(issue_ids, project_name):", "in field_values: field_def = field_name_dict.get(fv.fieldName) if not field_def: logging.warning('Custom field", "return result def convert_issueref_pbs(issueref_pbs, mar, services): \"\"\"Convert API IssueRef PBs", "to API ProjectWrapper PB.\"\"\" return api_pb2_v1.ProjectWrapper( kind='monorail#project', name=project.project_name, externalId=project.project_name, htmlLink='/p/%s/'", "= 0 elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE: new_fv.str_value = fv.fieldValue elif", "field_def: logging.warning('Custom field %s of does not exist', fv.fieldName) continue", "'Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId)) return result", "result.status = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.OWNER: if len(amendment.added_user_ids) ==", "trap_exception: logging.warning(str(ex)) return None else: raise ex days_ago = None", "datetime.datetime.fromtimestamp( issue.status_modified_timestamp) if issue.component_modified_timestamp: resp.component_modified = datetime.datetime.fromtimestamp( issue.component_modified_timestamp) return resp", "project = services.project.GetProjectByName(mar.cnxn, project_name) if not project: raise project_svc.NoSuchProjectException( 'Project", "blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services), blocking=convert_issue_ids(issue.blocking_iids, mar, services), canComment=permissions.CanCommentIssue( mar.auth.effective_ids, mar.perms, issue_project,", "framework_helpers from framework import permissions from framework import timestr from", "trap_exception=False): \"\"\"Convert user id to API AtomPerson PB.\"\"\" if not", "services import project_svc from services import user_svc from tracker import", "in issueref_pbs: project_id = mar.project_id if ir.projectId: project = services.project.GetProjectByName(", "def convert_attachment(attachment): \"\"\"Convert Monorail Attachment PB to API Attachment.\"\"\" return", "exist.' % (ir.projectId, ir.issueId)) return result else: return None def", "kind='monorail#project', name=project.project_name, externalId=project.project_name, htmlLink='/p/%s/' % project.project_name, summary=project.summary, description=project.description, role=role, issuesConfig=convert_project_config(config))", "'%s-%s' % (fv.fieldName, fv.fieldValue) if fv.operator == api_pb2_v1.FieldValueOperator.remove: label_list_remove.append(raw_val) elif", "user_id) except user_svc.NoSuchUserException as ex: if trap_exception: logging.warning(str(ex)) return None", "cnxn, user_id) if not user_email: user_email = framework_constants.DELETED_USER_NAME except user_svc.NoSuchUserException:", "component = api_pb2_v1.Component( componentId=cd.component_id, projectName=project_name, componentPath=cd.path, description=cd.docstring, admin=sorted([user_names_dict[uid] for uid", "htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id), last_visit_days_ago=days_ago, email_bouncing=bool(user.email_bounce_timestamp), vacation_message=user.vacation_message) def convert_issue_ids(issue_ids, mar,", "comment.user_id, mar.project, permissions.GetRestrictions(issue), granted_perms=granted_perms) return api_pb2_v1.IssueCommentWrapper( attachments=[convert_attachment(a) for a in", "services import issue_svc from services import project_svc from services import", "if not user_email: user_email = framework_constants.DELETED_USER_NAME except user_svc.NoSuchUserException: user_email =", "if issue.merged_into: resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0] if issue.owner_modified_timestamp: resp.owner_modified = datetime.datetime.fromtimestamp(", "+ cd.cc_ids + [cd.creator_id] + [cd.modifier_id]) user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids))", "value operater %s', fv.operator) else: new_fv = tracker_pb2.FieldValue( field_id=field_def.field_id) if", "%s', fv.operator) else: new_fv = tracker_pb2.FieldValue( field_id=field_def.field_id) if field_def.field_type ==", "list(user_ids)) component = api_pb2_v1.Component( componentId=cd.component_id, projectName=project_name, componentPath=cd.path, description=cd.docstring, admin=sorted([user_names_dict[uid] for", "if cid in issue.component_ids: component_list.append(cd.path) cc_list = [convert_person(p, mar.cnxn, services)", "field_name: logging.warning('Custom field %d of project %s does not exist',", "convert_group_settings(group_name, setting): \"\"\"Convert UserGroupSettings to UserGroupSettingsWrapper.\"\"\" return api_pb2_v1.UserGroupSettingsWrapper( groupName=group_name, who_can_view_members=setting.who_can_view_members,", "cd.admin_ids + cd.cc_ids + [cd.creator_id] + [cd.modifier_id]) user_names_dict = services.user.LookupUserEmails(mar.cnxn,", "user_names_dict[cd.modifier_id] return component def convert_component_ids(config, component_names): \"\"\"Convert a list of", "API PB objects\"\"\" import datetime import logging import time from", "to FieldValue PB, or labels.\"\"\" fv_list_add = [] fv_list_remove =", "= issue_ids.split() for id_str in id_list: if ':' in id_str:", "cpath = cd.path if cpath.lower() in component_names_lower: result.append(cd.component_id) return result", "restrictToKnown=config.restrict_to_known, defaultColumns=config.default_col_spec.split(), defaultSorting=config.default_sort_spec.split(), statuses=[convert_status(s) for s in config.well_known_statuses], labels=[convert_label(l) for", "config, role): \"\"\"Convert Monorail Project PB to API ProjectWrapper PB.\"\"\"", "field value type %s', field_def.field_type) if fv.operator == api_pb2_v1.FieldValueOperator.remove: fv_list_remove.append(new_fv)", "Monorail Issue PB to API IssuesGetInsertResponse.\"\"\" config = services.config.GetProjectConfig(mar.cnxn, issue.project_id)", "issue in issues: issue_ref = api_pb2_v1.IssueRef( issueId=issue.local_id, projectId=issue.project_name, kind='monorail#issueRef') result.append(issue_ref)", "elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE: new_fv.str_value = fv.fieldValue elif field_def.field_type ==", "def convert_project(project, config, role): \"\"\"Convert Monorail Project PB to API", "(project_name, id_str)) return result def split_remove_add(item_list): \"\"\"Split one list of", "from proto import api_pb2_v1 from proto import project_pb2 from proto", "% user_email) elif amendment.field == tracker_pb2.FieldID.BLOCKEDON: result.blockedOn = _append_project( amendment.newvalue,", "PB.\"\"\" if not user_id: return None try: user = services.user.GetUser(cnxn,", "label_list_remove = [] field_name_dict = { fd.field_name: fd for fd", "fv in issue.field_values: field_name = field_id_dict.get(fv.field_id) if not field_name: logging.warning('Custom", "statuses=[convert_status(s) for s in config.well_known_statuses], labels=[convert_label(l) for l in config.well_known_labels],", "[] for ir in issueref_pbs: project_id = mar.project_id if ir.projectId:", "convert_label(label): \"\"\"Convert Monorail LabelDef PB to API Label PB.\"\"\" return", "\"\"\"Convert Monorail ProjectIssueConfig PB to API ProjectIssueConfig PB.\"\"\" return api_pb2_v1.ProjectIssueConfig(", "= [p for p in cc_list if p is not", "api_pb2_v1.IssueState.closed), labels=issue.labels, components=component_list, author=convert_person(issue.reporter_id, mar.cnxn, services), owner=convert_person(issue.owner_id, mar.cnxn, services), cc=cc_list,", "services): \"\"\"Convert ComponentDef PB to Component PB.\"\"\" project_name = services.project.LookupProjectNames(", "if not field_def: logging.warning('Custom field %s of does not exist',", "= [] field_id_dict = { fd.field_id: fd.field_name for fd in", "by a BSD-style # license that can be found in", "return api_pb2_v1.ProjectIssueConfig( kind='monorail#projectIssueConfig', restrictToKnown=config.restrict_to_known, defaultColumns=config.default_col_spec.split(), defaultSorting=config.default_sort_spec.split(), statuses=[convert_status(s) for s in", "for cd in config.component_defs: cid = cd.component_id if cid in", "in pair: pair_ary = pair.split(':') project_name = pair_ary[0] local_id =", "fv_list_add.append(new_fv) else: logging.warning('Unsupported field value operater %s', fv.operator) return (fv_list_add,", "defaultSorting=config.default_sort_spec.split(), statuses=[convert_status(s) for s in config.well_known_statuses], labels=[convert_label(l) for l in", "services import user_svc from tracker import tracker_bizobj from tracker import", "email_bouncing=bool(user.email_bounce_timestamp), vacation_message=user.vacation_message) def convert_issue_ids(issue_ids, mar, services): \"\"\"Convert global issue ids", "list of Monorail Amendment PBs to API Update.\"\"\" result =", "list of items into two: items to add and items", "services) for p in issue.cc_ids] cc_list = [p for p", "config) issue_project = services.project.GetProject(mar.cnxn, issue.project_id) component_list = [] for cd", "result.append('%s:%s' % (project_name, id_str)) return result def split_remove_add(item_list): \"\"\"Split one", "def split_remove_add(item_list): \"\"\"Split one list of items into two: items", "== api_pb2_v1.FieldValueOperator.remove: fv_list_remove.append(new_fv) elif fv.operator == api_pb2_v1.FieldValueOperator.add: fv_list_add.append(new_fv) else: logging.warning('Unsupported", "import api_pb2_v1 from proto import project_pb2 from proto import tracker_pb2", "id=issue.local_id, title=issue.summary, summary=issue.summary, projectId=issue_project.project_name, stars=issue.star_count, starred=services.issue_star.IsItemStarredBy( mar.cnxn, issue.issue_id, mar.auth.user_id), status=issue.status,", "if issue.owner_modified_timestamp: resp.owner_modified = datetime.datetime.fromtimestamp( issue.owner_modified_timestamp) if issue.status_modified_timestamp: resp.status_modified =", "removed elif id_str.startswith('-'): result.append('-%s:%s' % (project_name, id_str[1:])) else: result.append('%s:%s' %", "are stored as labels if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE: raw_val =", "labels if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE: raw_val = '%s-%s' % (fv.fieldName,", "services.issue.GetIssueByLocalID( mar.cnxn, project_id, ir.issueId) result.append(issue.issue_id) except issue_svc.NoSuchIssueException: logging.warning( 'Issue (%s:%d)", "externalId=project.project_name, htmlLink='/p/%s/' % project.project_name, summary=project.summary, description=project.description, role=role, issuesConfig=convert_project_config(config)) def convert_project_config(config):", "try: user = services.user.GetUser(cnxn, user_id) except user_svc.NoSuchUserException as ex: if", "new_fv.user_id = 0 elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE: new_fv.str_value = fv.fieldValue", "proto import api_pb2_v1 from proto import project_pb2 from proto import", "= set() user_ids.update( cd.admin_ids + cd.cc_ids + [cd.creator_id] + [cd.modifier_id])", "UserGroupSettingsWrapper.\"\"\" return api_pb2_v1.UserGroupSettingsWrapper( groupName=group_name, who_can_view_members=setting.who_can_view_members, ext_group_type=setting.ext_group_type, last_sync_time=setting.last_sync_time) def convert_component_def(cd, mar,", "logging.warning('Unsupported field value operater %s', fv.operator) return (fv_list_add, fv_list_remove, fv_list_clear,", "(project_name, id_str[1:])) else: result.append('%s:%s' % (project_name, id_str)) return result def", "TemplateDef PB to API Prompt PB.\"\"\" return api_pb2_v1.Prompt( name=template.name, title=template.summary,", "services), owner=convert_person(issue.owner_id, mar.cnxn, services), cc=cc_list, updated=datetime.datetime.fromtimestamp(issue.modified_timestamp), published=datetime.datetime.fromtimestamp(issue.opened_timestamp), blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services),", "for ir in issueref_pbs: project_id = mar.project_id if ir.projectId: project", "def issue_global_ids(project_local_id_pairs, project_id, mar, services): \"\"\"Find global issues ids given", "value type %s', field_def.field_type) if fv.operator == api_pb2_v1.FieldValueOperator.remove: fv_list_remove.append(new_fv) elif", "mar.config.field_defs} for fv in field_values: field_def = field_name_dict.get(fv.fieldName) if not", "[] label_list_add = [] label_list_remove = [] field_name_dict = {", "in project_local_id_pairs: issue_project_id = None local_id = None if ':'", "field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE: new_fv.int_value = int(fv.fieldValue) else: logging.warning( 'Unsupported field", "= [] for ir in issueref_pbs: project_id = mar.project_id if", "permissions.CanDelete( mar.auth.user_id, mar.auth.effective_ids, mar.perms, comment.deleted_by, comment.user_id, mar.project, permissions.GetRestrictions(issue), granted_perms=granted_perms) return", "PB to API Attachment.\"\"\" return api_pb2_v1.Attachment( attachmentId=attachment.attachment_id, fileName=attachment.filename, fileSize=attachment.filesize, mimetype=attachment.mimetype,", "= framework_constants.NO_USER_NAME else: result.owner = _get_user_email( services.user, mar.cnxn, amendment.added_user_ids[0]) elif", "user_email = _get_user_email( services.user, mar.cnxn, user_id) result.cc.append('-%s' % user_email) elif", "{ fd.field_name: fd for fd in mar.config.field_defs} for fv in", "result def split_remove_add(item_list): \"\"\"Split one list of items into two:", "pair: pair_ary = pair.split(':') project_name = pair_ary[0] local_id = int(pair_ary[1])", "int(pair) result.append( services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id)) return result def convert_group_settings(group_name, setting):", "fv.operator == api_pb2_v1.FieldValueOperator.add: label_list_add.append(raw_val) else: logging.warning('Unsupported field value operater %s',", "framework_constants from framework import framework_helpers from framework import permissions from", "mar.project, permissions.GetRestrictions(issue), granted_perms=granted_perms) return api_pb2_v1.IssueCommentWrapper( attachments=[convert_attachment(a) for a in comment.attachments],", "IssueRef PBs to global issue ids.\"\"\" if issueref_pbs: result =", "published=datetime.datetime.fromtimestamp(comment.timestamp), updates=convert_amendments(issue, comment.amendments, mar, services), kind='monorail#issueComment') def convert_attachment(attachment): \"\"\"Convert Monorail", "issue.project_name) elif amendment.field == tracker_pb2.FieldID.MERGEDINTO: result.mergedInto = amendment.newvalue elif amendment.field", "= api_pb2_v1.IssueRef( issueId=issue.local_id, projectId=issue.project_name, kind='monorail#issueRef') result.append(issue_ref) return result def convert_issueref_pbs(issueref_pbs,", "if p is not None] field_values_list = [] field_id_dict =", "amendment.custom_field_name fv.fieldValue = amendment.newvalue result.fieldValues.append(fv) return result def _get_user_email(user_service, cnxn,", "amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CC: for user_id in amendment.added_user_ids: user_email", "id_str in id_list: if ':' in id_str: result.append(id_str) # '-'", "uid in cd.cc_ids]), deprecated=cd.deprecated) if cd.created: component.created = datetime.datetime.fromtimestamp(cd.created) component.creator", "None if user.last_visit_timestamp: secs_ago = int(time.time()) - user.last_visit_timestamp days_ago =", "fv_list_remove.append(new_fv) elif fv.operator == api_pb2_v1.FieldValueOperator.add: fv_list_add.append(new_fv) else: logging.warning('Unsupported field value", "services): \"\"\"Convert a list of Monorail Amendment PBs to API", "user_id), last_visit_days_ago=days_ago, email_bouncing=bool(user.email_bounce_timestamp), vacation_message=user.vacation_message) def convert_issue_ids(issue_ids, mar, services): \"\"\"Convert global", "PB, or labels.\"\"\" fv_list_add = [] fv_list_remove = [] fv_list_clear", "PB to API Status PB.\"\"\" return api_pb2_v1.Status( status=status.status, meansOpen=status.means_open, description=status.status_docstring)", "try: new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue) except user_svc.NoSuchUserException: new_fv.user_id = 0", "project_svc from services import user_svc from tracker import tracker_bizobj from", "== tracker_pb2.FieldID.OWNER: if len(amendment.added_user_ids) == 0: result.owner = framework_constants.NO_USER_NAME else:", "s in config.well_known_statuses], labels=[convert_label(l) for l in config.well_known_labels], prompts=[convert_template(t) for", "[] fv_list_remove = [] fv_list_clear = [] label_list_add = []", "API IssuesGetInsertResponse.\"\"\" config = services.config.GetProjectConfig(mar.cnxn, issue.project_id) granted_perms = tracker_bizobj.GetGrantedPerms( issue,", "mar.cnxn, user_id) result.cc.append(user_email) for user_id in amendment.removed_user_ids: user_email = _get_user_email(", "for amendment in amendments: if amendment.field == tracker_pb2.FieldID.SUMMARY: result.summary =", "PB to API Prompt PB.\"\"\" return api_pb2_v1.Prompt( name=template.name, title=template.summary, description=template.content,", "issue.status_modified_timestamp) if issue.component_modified_timestamp: resp.component_modified = datetime.datetime.fromtimestamp( issue.component_modified_timestamp) return resp def", "fv.fieldValue = amendment.newvalue result.fieldValues.append(fv) return result def _get_user_email(user_service, cnxn, user_id):", "for fv in field_values: field_def = field_name_dict.get(fv.fieldName) if not field_def:", "elif fv.operator == api_pb2_v1.FieldValueOperator.add: fv_list_add.append(new_fv) else: logging.warning('Unsupported field value operater", "cc_list if p is not None] field_values_list = [] field_id_dict", "issue ids.\"\"\" if issueref_pbs: result = [] for ir in", "= amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CC: for user_id in amendment.added_user_ids:", "fv.operator) else: new_fv = tracker_pb2.FieldValue( field_id=field_def.field_id) if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE:", "import permissions from framework import timestr from proto import api_pb2_v1", "name to convert <id> to <project>:<id> format.\"\"\" result = []", "[] label_list_remove = [] field_name_dict = { fd.field_name: fd for", "into two: items to add and items to remove.\"\"\" list_to_add", "Project PB to API ProjectWrapper PB.\"\"\" return api_pb2_v1.ProjectWrapper( kind='monorail#project', name=project.project_name,", "result.fieldValues.append(fv) return result def _get_user_email(user_service, cnxn, user_id): \"\"\"Get user email.\"\"\"", "= [] fv_list_remove = [] fv_list_clear = [] label_list_add =", "services.user, mar.cnxn, fv.user_id) elif fv.str_value: val = fv.str_value elif fv.int_value:", "can be found in the LICENSE file or at #", "if tracker_helpers.MeansOpenInProject( tracker_bizobj.GetStatus(issue), config) else api_pb2_v1.IssueState.closed), labels=issue.labels, components=component_list, author=convert_person(issue.reporter_id, mar.cnxn,", "this source code is govered by a BSD-style # license", "services), canComment=permissions.CanCommentIssue( mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms), canEdit=permissions.CanEditIssue( mar.auth.effective_ids, mar.perms,", "fv_list_remove = [] fv_list_clear = [] label_list_add = [] label_list_remove", "fd for fd in mar.config.field_defs} for fv in field_values: field_def", "Monorail Attachment PB to API Attachment.\"\"\" return api_pb2_v1.Attachment( attachmentId=attachment.attachment_id, fileName=attachment.filename,", "amendment.field == tracker_pb2.FieldID.COMPONENTS: result.components = amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CUSTOM:", "\"\"\"Convert user passed in field value list to FieldValue PB,", "user_id: return None try: user = services.user.GetUser(cnxn, user_id) except user_svc.NoSuchUserException", "== api_pb2_v1.FieldValueOperator.add: fv_list_add.append(new_fv) else: logging.warning('Unsupported field value operater %s', fv.operator)", "if not field_name: logging.warning('Custom field %d of project %s does", "fv.str_value: val = fv.str_value elif fv.int_value: val = str(fv.int_value) new_fv", "if issue.status_modified_timestamp: resp.status_modified = datetime.datetime.fromtimestamp( issue.status_modified_timestamp) if issue.component_modified_timestamp: resp.component_modified =", "result.blockedOn = _append_project( amendment.newvalue, issue.project_name) elif amendment.field == tracker_pb2.FieldID.BLOCKING: result.blocking", "api_pb2_v1.FieldValueOperator.clear: fv_list_clear.append(field_def.field_id) continue # Enum fields are stored as labels", "issue.merged_into: resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0] if issue.owner_modified_timestamp: resp.owner_modified = datetime.datetime.fromtimestamp( issue.owner_modified_timestamp)", "elif fv.operator == api_pb2_v1.FieldValueOperator.add: label_list_add.append(raw_val) else: logging.warning('Unsupported field value operater", "= { fd.field_id: fd.field_name for fd in config.field_defs} for fv", "config = services.config.GetProjectConfig(mar.cnxn, issue.project_id) granted_perms = tracker_bizobj.GetGrantedPerms( issue, mar.auth.effective_ids, config)", "def _append_project(issue_ids, project_name): \"\"\"Append project name to convert <id> to", "= [] for cd in config.component_defs: cpath = cd.path if", "def convert_issue_ids(issue_ids, mar, services): \"\"\"Convert global issue ids to API", "description=cd.docstring, admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]), cc=sorted([user_names_dict[uid] for uid in", "project = services.project.GetProjectByName( mar.cnxn, ir.projectId) if project: project_id = project.project_id", "type %s', field_def.field_type) if fv.operator == api_pb2_v1.FieldValueOperator.remove: fv_list_remove.append(new_fv) elif fv.operator", "granted_perms): \"\"\"Convert Monorail IssueComment PB to API IssueCommentWrapper.\"\"\" can_delete =", "author=convert_person(comment.user_id, mar.cnxn, services, trap_exception=True), canDelete=can_delete, content=comment.content, deletedBy=convert_person(comment.deleted_by, mar.cnxn, services, trap_exception=True),", "result def convert_issueref_pbs(issueref_pbs, mar, services): \"\"\"Convert API IssueRef PBs to", "return None else: raise ex days_ago = None if user.last_visit_timestamp:", "Amendment PBs to API Update.\"\"\" result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate') for amendment", "except issue_svc.NoSuchIssueException: logging.warning( 'Issue (%s:%d) does not exist.' % (ir.projectId,", "<gh_stars>1-10 # Copyright 2016 The Chromium Authors. All rights reserved.", "result = [] id_list = issue_ids.split() for id_str in id_list:", "= services.user.LookupUserID(mar.cnxn, fv.fieldValue) except user_svc.NoSuchUserException: new_fv.user_id = 0 elif field_def.field_type", "api_pb2_v1.FieldValueOperator.add: fv_list_add.append(new_fv) else: logging.warning('Unsupported field value operater %s', fv.operator) return", "def convert_component_ids(config, component_names): \"\"\"Convert a list of component names to", "services.project.LookupProjectNames( mar.cnxn, [cd.project_id])[cd.project_id] user_ids = set() user_ids.update( cd.admin_ids + cd.cc_ids", "remove.\"\"\" list_to_add = [] list_to_remove = [] for item in", "_append_project( amendment.newvalue, issue.project_name) elif amendment.field == tracker_pb2.FieldID.MERGEDINTO: result.mergedInto = amendment.newvalue", "issueref_pbs: project_id = mar.project_id if ir.projectId: project = services.project.GetProjectByName( mar.cnxn,", "else: return None def convert_issue(cls, issue, mar, services): \"\"\"Convert Monorail", "tracker_pb2.FieldTypes.INT_TYPE: new_fv.int_value = int(fv.fieldValue) else: logging.warning( 'Unsupported field value type", "datetime.datetime.fromtimestamp(cd.created) component.creator = user_names_dict[cd.creator_id] if cd.modified: component.modified = datetime.datetime.fromtimestamp(cd.modified) component.modifier", "item.startswith('-'): list_to_remove.append(item[1:]) else: list_to_add.append(item) return list_to_add, list_to_remove # TODO(sheyang): batch", "issue_project_id = project.project_id else: issue_project_id = project_id local_id = int(pair)", "framework import permissions from framework import timestr from proto import", "fv = api_pb2_v1.FieldValue() fv.fieldName = amendment.custom_field_name fv.fieldValue = amendment.newvalue result.fieldValues.append(fv)", "logging.warning('Custom field %s of does not exist', fv.fieldName) continue if", "names to ids.\"\"\" component_names_lower = [name.lower() for name in component_names]", "return api_pb2_v1.ProjectWrapper( kind='monorail#project', name=project.project_name, externalId=project.project_name, htmlLink='/p/%s/' % project.project_name, summary=project.summary, description=project.description,", "= int(time.time()) - user.last_visit_timestamp days_ago = secs_ago / framework_constants.SECS_PER_DAY return", "tracker_bizobj.GetStatus(issue), config) else api_pb2_v1.IssueState.closed), labels=issue.labels, components=component_list, author=convert_person(issue.reporter_id, mar.cnxn, services), owner=convert_person(issue.owner_id,", "issue_project, issue, granted_perms=granted_perms), canEdit=permissions.CanEditIssue( mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms), fieldValues=field_values_list)", "Monorail LabelDef PB to API Label PB.\"\"\" return api_pb2_v1.Label( label=label.label,", "== tracker_pb2.FieldID.BLOCKING: result.blocking = _append_project( amendment.newvalue, issue.project_name) elif amendment.field ==", "permissions.GetRestrictions(issue), granted_perms=granted_perms) return api_pb2_v1.IssueCommentWrapper( attachments=[convert_attachment(a) for a in comment.attachments], author=convert_person(comment.user_id,", "field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE: raw_val = '%s-%s' % (fv.fieldName, fv.fieldValue) if", "else: logging.warning('Unsupported field value operater %s', fv.operator) else: new_fv =", "value operater %s', fv.operator) return (fv_list_add, fv_list_remove, fv_list_clear, label_list_add, label_list_remove)", "proto import tracker_pb2 from services import issue_svc from services import", "status=issue.status, state=(api_pb2_v1.IssueState.open if tracker_helpers.MeansOpenInProject( tracker_bizobj.GetStatus(issue), config) else api_pb2_v1.IssueState.closed), labels=issue.labels, components=component_list,", "list_to_add, list_to_remove # TODO(sheyang): batch the SQL queries to fetch", "return api_pb2_v1.Status( status=status.status, meansOpen=status.means_open, description=status.status_docstring) def convert_label(label): \"\"\"Convert Monorail LabelDef", "title=template.summary, description=template.content, titleMustBeEdited=template.summary_must_be_edited, status=template.status, labels=template.labels, membersOnly=template.members_only, defaultToMember=template.owner_defaults_to_member, componentRequired=template.component_required) def convert_person(user_id,", "== tracker_pb2.FieldTypes.INT_TYPE: new_fv.int_value = int(fv.fieldValue) else: logging.warning( 'Unsupported field value", "field_name_dict.get(fv.fieldName) if not field_def: logging.warning('Custom field %s of does not", "from framework import framework_helpers from framework import permissions from framework", "% project.project_name, summary=project.summary, description=project.description, role=role, issuesConfig=convert_project_config(config)) def convert_project_config(config): \"\"\"Convert Monorail", "kind='monorail#projectIssueConfig', restrictToKnown=config.restrict_to_known, defaultColumns=config.default_col_spec.split(), defaultSorting=config.default_sort_spec.split(), statuses=[convert_status(s) for s in config.well_known_statuses], labels=[convert_label(l)", "# missed issue ids are filtered out. issues = services.issue.GetIssues(mar.cnxn,", "if issue.closed_timestamp > 0: resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp) if issue.merged_into: resp.mergedInto=convert_issue_ids([issue.merged_into],", "as labels if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE: raw_val = '%s-%s' %", "= services.project.LookupProjectNames( mar.cnxn, [cd.project_id])[cd.project_id] user_ids = set() user_ids.update( cd.admin_ids +", "= user_names_dict[cd.creator_id] if cd.modified: component.modified = datetime.datetime.fromtimestamp(cd.modified) component.modifier = user_names_dict[cd.modifier_id]", "issue.closed_timestamp > 0: resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp) if issue.merged_into: resp.mergedInto=convert_issue_ids([issue.merged_into], mar,", "= [] for issue in issues: issue_ref = api_pb2_v1.IssueRef( issueId=issue.local_id,", "user_email = user_service.LookupUserEmail( cnxn, user_id) if not user_email: user_email =", "result = [] for cd in config.component_defs: cpath = cd.path", "field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE: new_fv.str_value = fv.fieldValue elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE:", "not user_id: return None try: user = services.user.GetUser(cnxn, user_id) except", "[] for issue in issues: issue_ref = api_pb2_v1.IssueRef( issueId=issue.local_id, projectId=issue.project_name,", "if len(amendment.added_user_ids) == 0: result.owner = framework_constants.NO_USER_NAME else: result.owner =", "tracker import tracker_helpers def convert_project(project, config, role): \"\"\"Convert Monorail Project", "kind='monorail#issueComment') def convert_attachment(attachment): \"\"\"Convert Monorail Attachment PB to API Attachment.\"\"\"", "api_pb2_v1.Prompt( name=template.name, title=template.summary, description=template.content, titleMustBeEdited=template.summary_must_be_edited, status=template.status, labels=template.labels, membersOnly=template.members_only, defaultToMember=template.owner_defaults_to_member, componentRequired=template.component_required)", "mar, services): \"\"\"Convert user passed in field value list to", "setting): \"\"\"Convert UserGroupSettings to UserGroupSettingsWrapper.\"\"\" return api_pb2_v1.UserGroupSettingsWrapper( groupName=group_name, who_can_view_members=setting.who_can_view_members, ext_group_type=setting.ext_group_type,", "for fd in config.field_defs} for fv in issue.field_values: field_name =", "mar.cnxn, project_id, ir.issueId) result.append(issue.issue_id) except issue_svc.NoSuchIssueException: logging.warning( 'Issue (%s:%d) does", "issue, granted_perms=granted_perms), fieldValues=field_values_list) if issue.closed_timestamp > 0: resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp)", "user_email: user_email = framework_constants.DELETED_USER_NAME except user_svc.NoSuchUserException: user_email = framework_constants.DELETED_USER_NAME return", "does not exist' % project_name) issue_project_id = project.project_id else: issue_project_id", "%s of does not exist', fv.fieldName) continue if fv.operator ==", "issue.project_id) granted_perms = tracker_bizobj.GetGrantedPerms( issue, mar.auth.effective_ids, config) issue_project = services.project.GetProject(mar.cnxn,", "api_pb2_v1.FieldValue( fieldName=field_name, fieldValue=val, derived=fv.derived) field_values_list.append(new_fv) resp = cls( kind='monorail#issue', id=issue.local_id,", "fv.fieldValue) if fv.operator == api_pb2_v1.FieldValueOperator.remove: label_list_remove.append(raw_val) elif fv.operator == api_pb2_v1.FieldValueOperator.add:", "(%s:%d) does not exist.' % (ir.projectId, ir.issueId)) return result else:", "fv.user_id) elif fv.str_value: val = fv.str_value elif fv.int_value: val =", "= datetime.datetime.fromtimestamp(cd.created) component.creator = user_names_dict[cd.creator_id] if cd.modified: component.modified = datetime.datetime.fromtimestamp(cd.modified)", "return result def convert_field_values(field_values, mar, services): \"\"\"Convert user passed in", "cpath.lower() in component_names_lower: result.append(cd.component_id) return result def convert_field_values(field_values, mar, services):", "\"\"\"Convert API IssueRef PBs to global issue ids.\"\"\" if issueref_pbs:", "logging.warning('Custom field %d of project %s does not exist', fv.field_id,", "permissions from framework import timestr from proto import api_pb2_v1 from", "missed issue ids are filtered out. issues = services.issue.GetIssues(mar.cnxn, issue_ids)", "canDelete=can_delete, content=comment.content, deletedBy=convert_person(comment.deleted_by, mar.cnxn, services, trap_exception=True), id=comment.sequence, published=datetime.datetime.fromtimestamp(comment.timestamp), updates=convert_amendments(issue, comment.amendments,", "amendment.field == tracker_pb2.FieldID.CUSTOM: fv = api_pb2_v1.FieldValue() fv.fieldName = amendment.custom_field_name fv.fieldValue", "label_list_remove.append(raw_val) elif fv.operator == api_pb2_v1.FieldValueOperator.add: label_list_add.append(raw_val) else: logging.warning('Unsupported field value", "framework_constants.DELETED_USER_NAME except user_svc.NoSuchUserException: user_email = framework_constants.DELETED_USER_NAME return user_email def _append_project(issue_ids,", "l in config.well_known_labels], prompts=[convert_template(t) for t in config.templates], defaultPromptForMembers=config.default_template_for_developers, defaultPromptForNonMembers=config.default_template_for_users)", "services): \"\"\"Find global issues ids given <project_name>:<issue_local_id> pairs.\"\"\" result =", "timestr from proto import api_pb2_v1 from proto import project_pb2 from", "mar.cnxn, user_id) result.cc.append('-%s' % user_email) elif amendment.field == tracker_pb2.FieldID.BLOCKEDON: result.blockedOn", "def convert_person(user_id, cnxn, services, trap_exception=False): \"\"\"Convert user id to API", "\"\"\"Find global issues ids given <project_name>:<issue_local_id> pairs.\"\"\" result = []", "[] for pair in project_local_id_pairs: issue_project_id = None local_id =", "component.created = datetime.datetime.fromtimestamp(cd.created) component.creator = user_names_dict[cd.creator_id] if cd.modified: component.modified =", "in component_names_lower: result.append(cd.component_id) return result def convert_field_values(field_values, mar, services): \"\"\"Convert", "from proto import project_pb2 from proto import tracker_pb2 from services", "= field_name_dict.get(fv.fieldName) if not field_def: logging.warning('Custom field %s of does", "def convert_issueref_pbs(issueref_pbs, mar, services): \"\"\"Convert API IssueRef PBs to global", "issues ids given <project_name>:<issue_local_id> pairs.\"\"\" result = [] for pair", "% (project_name, id_str[1:])) else: result.append('%s:%s' % (project_name, id_str)) return result", "given <project_name>:<issue_local_id> pairs.\"\"\" result = [] for pair in project_local_id_pairs:", "continue if fv.operator == api_pb2_v1.FieldValueOperator.clear: fv_list_clear.append(field_def.field_id) continue # Enum fields", "elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE: new_fv.int_value = int(fv.fieldValue) else: logging.warning( 'Unsupported", "new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue) except user_svc.NoSuchUserException: new_fv.user_id = 0 elif", "return api_pb2_v1.Prompt( name=template.name, title=template.summary, description=template.content, titleMustBeEdited=template.summary_must_be_edited, status=template.status, labels=template.labels, membersOnly=template.members_only, defaultToMember=template.owner_defaults_to_member,", "issue.component_ids: component_list.append(cd.path) cc_list = [convert_person(p, mar.cnxn, services) for p in", "issue.cc_ids] cc_list = [p for p in cc_list if p", "[] id_list = issue_ids.split() for id_str in id_list: if ':'", "issue ids are filtered out. issues = services.issue.GetIssues(mar.cnxn, issue_ids) result", "convert_person(user_id, cnxn, services, trap_exception=False): \"\"\"Convert user id to API AtomPerson", "mar.cnxn, services, trap_exception=True), id=comment.sequence, published=datetime.datetime.fromtimestamp(comment.timestamp), updates=convert_amendments(issue, comment.amendments, mar, services), kind='monorail#issueComment')", "user_id in amendment.added_user_ids: user_email = _get_user_email( services.user, mar.cnxn, user_id) result.cc.append(user_email)", "= amendment.custom_field_name fv.fieldValue = amendment.newvalue result.fieldValues.append(fv) return result def _get_user_email(user_service,", "= datetime.datetime.fromtimestamp( issue.status_modified_timestamp) if issue.component_modified_timestamp: resp.component_modified = datetime.datetime.fromtimestamp( issue.component_modified_timestamp) return", "framework_constants.DELETED_USER_NAME return user_email def _append_project(issue_ids, project_name): \"\"\"Append project name to", "local_id = None if ':' in pair: pair_ary = pair.split(':')", "user_svc.NoSuchUserException: new_fv.user_id = 0 elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE: new_fv.str_value =", "user_svc from tracker import tracker_bizobj from tracker import tracker_helpers def", "user.last_visit_timestamp days_ago = secs_ago / framework_constants.SECS_PER_DAY return api_pb2_v1.AtomPerson( kind='monorail#issuePerson', name=user.email,", "= _append_project( amendment.newvalue, issue.project_name) elif amendment.field == tracker_pb2.FieldID.MERGEDINTO: result.mergedInto =", "fv.operator == api_pb2_v1.FieldValueOperator.clear: fv_list_clear.append(field_def.field_id) continue # Enum fields are stored", "fv.operator == api_pb2_v1.FieldValueOperator.remove: fv_list_remove.append(new_fv) elif fv.operator == api_pb2_v1.FieldValueOperator.add: fv_list_add.append(new_fv) else:", "[] field_name_dict = { fd.field_name: fd for fd in mar.config.field_defs}", "services.project.GetProjectByName(mar.cnxn, project_name) if not project: raise project_svc.NoSuchProjectException( 'Project %s does", "to add and items to remove.\"\"\" list_to_add = [] list_to_remove", "pair.split(':') project_name = pair_ary[0] local_id = int(pair_ary[1]) project = services.project.GetProjectByName(mar.cnxn,", "mar, services), blocking=convert_issue_ids(issue.blocking_iids, mar, services), canComment=permissions.CanCommentIssue( mar.auth.effective_ids, mar.perms, issue_project, issue,", "return user_email def _append_project(issue_ids, project_name): \"\"\"Append project name to convert", "= project.project_id else: issue_project_id = project_id local_id = int(pair) result.append(", "in component_names] result = [] for cd in config.component_defs: cpath", "status=status.status, meansOpen=status.means_open, description=status.status_docstring) def convert_label(label): \"\"\"Convert Monorail LabelDef PB to", "field_id=field_def.field_id) if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE: try: new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue)", "fv.str_value elif fv.int_value: val = str(fv.int_value) new_fv = api_pb2_v1.FieldValue( fieldName=field_name,", "else: new_fv = tracker_pb2.FieldValue( field_id=field_def.field_id) if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE: try:", "for item in item_list: if item.startswith('-'): list_to_remove.append(item[1:]) else: list_to_add.append(item) return", "if fv.operator == api_pb2_v1.FieldValueOperator.clear: fv_list_clear.append(field_def.field_id) continue # Enum fields are", "mar.auth.user_id, mar.auth.effective_ids, mar.perms, comment.deleted_by, comment.user_id, mar.project, permissions.GetRestrictions(issue), granted_perms=granted_perms) return api_pb2_v1.IssueCommentWrapper(", "user_service.LookupUserEmail( cnxn, user_id) if not user_email: user_email = framework_constants.DELETED_USER_NAME except", "PBs to API Update.\"\"\" result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate') for amendment in", "amendment.newvalue, issue.project_name) elif amendment.field == tracker_pb2.FieldID.MERGEDINTO: result.mergedInto = amendment.newvalue elif", "project_id = project.project_id try: issue = services.issue.GetIssueByLocalID( mar.cnxn, project_id, ir.issueId)", "cid = cd.component_id if cid in issue.component_ids: component_list.append(cd.path) cc_list =", "in issue.field_values: field_name = field_id_dict.get(fv.field_id) if not field_name: logging.warning('Custom field", "= _get_user_email( services.user, mar.cnxn, amendment.added_user_ids[0]) elif amendment.field == tracker_pb2.FieldID.LABELS: result.labels", "comment.amendments, mar, services), kind='monorail#issueComment') def convert_attachment(attachment): \"\"\"Convert Monorail Attachment PB", "or at # https://developers.google.com/open-source/licenses/bsd \"\"\"Convert Monorail PB objects to API", "user_email = framework_constants.DELETED_USER_NAME return user_email def _append_project(issue_ids, project_name): \"\"\"Append project", "to Component PB.\"\"\" project_name = services.project.LookupProjectNames( mar.cnxn, [cd.project_id])[cd.project_id] user_ids =", "summary=project.summary, description=project.description, role=role, issuesConfig=convert_project_config(config)) def convert_project_config(config): \"\"\"Convert Monorail ProjectIssueConfig PB", "resp = cls( kind='monorail#issue', id=issue.local_id, title=issue.summary, summary=issue.summary, projectId=issue_project.project_name, stars=issue.star_count, starred=services.issue_star.IsItemStarredBy(", "== tracker_pb2.FieldID.MERGEDINTO: result.mergedInto = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.COMPONENTS: result.components", "local_id = int(pair_ary[1]) project = services.project.GetProjectByName(mar.cnxn, project_name) if not project:", "project_name) issue_project_id = project.project_id else: issue_project_id = project_id local_id =", "else: logging.warning( 'Unsupported field value type %s', field_def.field_type) if fv.operator", "None else: raise ex days_ago = None if user.last_visit_timestamp: secs_ago", "Attachment.\"\"\" return api_pb2_v1.Attachment( attachmentId=attachment.attachment_id, fileName=attachment.filename, fileSize=attachment.filesize, mimetype=attachment.mimetype, isDeleted=attachment.deleted) def convert_amendments(issue,", "in config.templates], defaultPromptForMembers=config.default_template_for_developers, defaultPromptForNonMembers=config.default_template_for_users) def convert_status(status): \"\"\"Convert Monorail StatusDef PB", "a BSD-style # license that can be found in the", "result.owner = _get_user_email( services.user, mar.cnxn, amendment.added_user_ids[0]) elif amendment.field == tracker_pb2.FieldID.LABELS:", "new_fv = api_pb2_v1.FieldValue( fieldName=field_name, fieldValue=val, derived=fv.derived) field_values_list.append(new_fv) resp = cls(", "tracker_helpers.MeansOpenInProject( tracker_bizobj.GetStatus(issue), config) else api_pb2_v1.IssueState.closed), labels=issue.labels, components=component_list, author=convert_person(issue.reporter_id, mar.cnxn, services),", "list_to_add.append(item) return list_to_add, list_to_remove # TODO(sheyang): batch the SQL queries", "def convert_status(status): \"\"\"Convert Monorail StatusDef PB to API Status PB.\"\"\"", "_get_user_email( services.user, mar.cnxn, fv.user_id) elif fv.str_value: val = fv.str_value elif", "issueId=issue.local_id, projectId=issue.project_name, kind='monorail#issueRef') result.append(issue_ref) return result def convert_issueref_pbs(issueref_pbs, mar, services):", "if trap_exception: logging.warning(str(ex)) return None else: raise ex days_ago =", "return api_pb2_v1.Attachment( attachmentId=attachment.attachment_id, fileName=attachment.filename, fileSize=attachment.filesize, mimetype=attachment.mimetype, isDeleted=attachment.deleted) def convert_amendments(issue, amendments,", "services): \"\"\"Convert user passed in field value list to FieldValue", "framework_constants.SECS_PER_DAY return api_pb2_v1.AtomPerson( kind='monorail#issuePerson', name=user.email, htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id), last_visit_days_ago=days_ago,", "except user_svc.NoSuchUserException: new_fv.user_id = 0 elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE: new_fv.str_value", "API IssueRef PBs to global issue ids.\"\"\" if issueref_pbs: result", "from framework import timestr from proto import api_pb2_v1 from proto", "\"\"\"Convert Monorail StatusDef PB to API Status PB.\"\"\" return api_pb2_v1.Status(", "result else: return None def convert_issue(cls, issue, mar, services): \"\"\"Convert", "p in issue.cc_ids] cc_list = [p for p in cc_list", "field_id_dict = { fd.field_id: fd.field_name for fd in config.field_defs} for", "# Copyright 2016 The Chromium Authors. All rights reserved. #", "services.user, mar.cnxn, amendment.added_user_ids[0]) elif amendment.field == tracker_pb2.FieldID.LABELS: result.labels = amendment.newvalue.split()", "# Enum fields are stored as labels if field_def.field_type ==", "== tracker_pb2.FieldTypes.USER_TYPE: try: new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue) except user_svc.NoSuchUserException: new_fv.user_id", "items to add and items to remove.\"\"\" list_to_add = []", "tracker_pb2.FieldID.SUMMARY: result.summary = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.STATUS: result.status =", "cid in issue.component_ids: component_list.append(cd.path) cc_list = [convert_person(p, mar.cnxn, services) for", "= [convert_person(p, mar.cnxn, services) for p in issue.cc_ids] cc_list =", "fd.field_name: fd for fd in mar.config.field_defs} for fv in field_values:", "fv in field_values: field_def = field_name_dict.get(fv.fieldName) if not field_def: logging.warning('Custom", "int(fv.fieldValue) else: logging.warning( 'Unsupported field value type %s', field_def.field_type) if", "from proto import tracker_pb2 from services import issue_svc from services", "= secs_ago / framework_constants.SECS_PER_DAY return api_pb2_v1.AtomPerson( kind='monorail#issuePerson', name=user.email, htmlLink='https://%s/u/%d' %", "None def convert_issue(cls, issue, mar, services): \"\"\"Convert Monorail Issue PB", "p is not None] field_values_list = [] field_id_dict = {", "id to API AtomPerson PB.\"\"\" if not user_id: return None", "the LICENSE file or at # https://developers.google.com/open-source/licenses/bsd \"\"\"Convert Monorail PB", "components=component_list, author=convert_person(issue.reporter_id, mar.cnxn, services), owner=convert_person(issue.owner_id, mar.cnxn, services), cc=cc_list, updated=datetime.datetime.fromtimestamp(issue.modified_timestamp), published=datetime.datetime.fromtimestamp(issue.opened_timestamp),", "convert_issue_ids(issue_ids, mar, services): \"\"\"Convert global issue ids to API IssueRef", "component names to ids.\"\"\" component_names_lower = [name.lower() for name in", "\"\"\"Convert Monorail Issue PB to API IssuesGetInsertResponse.\"\"\" config = services.config.GetProjectConfig(mar.cnxn,", "field value list to FieldValue PB, or labels.\"\"\" fv_list_add =", "items to remove.\"\"\" list_to_add = [] list_to_remove = [] for", "= services.user.LookupUserEmails(mar.cnxn, list(user_ids)) component = api_pb2_v1.Component( componentId=cd.component_id, projectName=project_name, componentPath=cd.path, description=cd.docstring,", "[convert_person(p, mar.cnxn, services) for p in issue.cc_ids] cc_list = [p", "if project: project_id = project.project_id try: issue = services.issue.GetIssueByLocalID( mar.cnxn,", "is not None] field_values_list = [] field_id_dict = { fd.field_id:", "defaultToMember=template.owner_defaults_to_member, componentRequired=template.component_required) def convert_person(user_id, cnxn, services, trap_exception=False): \"\"\"Convert user id", "# https://developers.google.com/open-source/licenses/bsd \"\"\"Convert Monorail PB objects to API PB objects\"\"\"", "PB objects to API PB objects\"\"\" import datetime import logging", "def convert_template(template): \"\"\"Convert Monorail TemplateDef PB to API Prompt PB.\"\"\"", "tracker_pb2.FieldID.MERGEDINTO: result.mergedInto = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.COMPONENTS: result.components =", "if not user_id: return None try: user = services.user.GetUser(cnxn, user_id)", "cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]), deprecated=cd.deprecated) if cd.created: component.created =", "return result else: return None def convert_issue(cls, issue, mar, services):", "services, granted_perms): \"\"\"Convert Monorail IssueComment PB to API IssueCommentWrapper.\"\"\" can_delete", "role=role, issuesConfig=convert_project_config(config)) def convert_project_config(config): \"\"\"Convert Monorail ProjectIssueConfig PB to API", "tracker_pb2 from services import issue_svc from services import project_svc from", "'Unsupported field value type %s', field_def.field_type) if fv.operator == api_pb2_v1.FieldValueOperator.remove:", "Authors. All rights reserved. # Use of this source code", "= '%s-%s' % (fv.fieldName, fv.fieldValue) if fv.operator == api_pb2_v1.FieldValueOperator.remove: label_list_remove.append(raw_val)", "license that can be found in the LICENSE file or", "mar.cnxn, [cd.project_id])[cd.project_id] user_ids = set() user_ids.update( cd.admin_ids + cd.cc_ids +", "fv.int_value: val = str(fv.int_value) new_fv = api_pb2_v1.FieldValue( fieldName=field_name, fieldValue=val, derived=fv.derived)", "attachmentId=attachment.attachment_id, fileName=attachment.filename, fileSize=attachment.filesize, mimetype=attachment.mimetype, isDeleted=attachment.deleted) def convert_amendments(issue, amendments, mar, services):", "granted_perms=granted_perms) return api_pb2_v1.IssueCommentWrapper( attachments=[convert_attachment(a) for a in comment.attachments], author=convert_person(comment.user_id, mar.cnxn,", "api_pb2_v1.ProjectWrapper( kind='monorail#project', name=project.project_name, externalId=project.project_name, htmlLink='/p/%s/' % project.project_name, summary=project.summary, description=project.description, role=role,", "fd in config.field_defs} for fv in issue.field_values: field_name = field_id_dict.get(fv.field_id)", "fileName=attachment.filename, fileSize=attachment.filesize, mimetype=attachment.mimetype, isDeleted=attachment.deleted) def convert_amendments(issue, amendments, mar, services): \"\"\"Convert", "mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms), canEdit=permissions.CanEditIssue( mar.auth.effective_ids, mar.perms, issue_project, issue,", "in amendment.added_user_ids: user_email = _get_user_email( services.user, mar.cnxn, user_id) result.cc.append(user_email) for", "fetch projects/issues. def issue_global_ids(project_local_id_pairs, project_id, mar, services): \"\"\"Find global issues", "field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE: try: new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue) except user_svc.NoSuchUserException:", "issues: issue_ref = api_pb2_v1.IssueRef( issueId=issue.local_id, projectId=issue.project_name, kind='monorail#issueRef') result.append(issue_ref) return result", "to API AtomPerson PB.\"\"\" if not user_id: return None try:", "# TODO(sheyang): batch the SQL queries to fetch projects/issues. def", "component_list.append(cd.path) cc_list = [convert_person(p, mar.cnxn, services) for p in issue.cc_ids]", "of Monorail Amendment PBs to API Update.\"\"\" result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate')", "return api_pb2_v1.AtomPerson( kind='monorail#issuePerson', name=user.email, htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id), last_visit_days_ago=days_ago, email_bouncing=bool(user.email_bounce_timestamp),", "kind='monorail#issue', id=issue.local_id, title=issue.summary, summary=issue.summary, projectId=issue_project.project_name, stars=issue.star_count, starred=services.issue_star.IsItemStarredBy( mar.cnxn, issue.issue_id, mar.auth.user_id),", "project.project_id else: issue_project_id = project_id local_id = int(pair) result.append( services.issue.LookupIssueID(mar.cnxn,", "if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE: try: new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue) except", "str(fv.int_value) new_fv = api_pb2_v1.FieldValue( fieldName=field_name, fieldValue=val, derived=fv.derived) field_values_list.append(new_fv) resp =", "convert <id> to <project>:<id> format.\"\"\" result = [] id_list =", "elif fv.int_value: val = str(fv.int_value) new_fv = api_pb2_v1.FieldValue( fieldName=field_name, fieldValue=val,", "- user.last_visit_timestamp days_ago = secs_ago / framework_constants.SECS_PER_DAY return api_pb2_v1.AtomPerson( kind='monorail#issuePerson',", "to ids.\"\"\" component_names_lower = [name.lower() for name in component_names] result", "mar.cnxn, services) for p in issue.cc_ids] cc_list = [p for", "def _get_user_email(user_service, cnxn, user_id): \"\"\"Get user email.\"\"\" try: user_email =", "return component def convert_component_ids(config, component_names): \"\"\"Convert a list of component", "logging.warning('Unsupported field value operater %s', fv.operator) else: new_fv = tracker_pb2.FieldValue(", "issue_ids) result = [] for issue in issues: issue_ref =", "try: issue = services.issue.GetIssueByLocalID( mar.cnxn, project_id, ir.issueId) result.append(issue.issue_id) except issue_svc.NoSuchIssueException:", "[] list_to_remove = [] for item in item_list: if item.startswith('-'):", "isDeleted=attachment.deleted) def convert_amendments(issue, amendments, mar, services): \"\"\"Convert a list of", "fv_list_clear = [] label_list_add = [] label_list_remove = [] field_name_dict", "not user_email: user_email = framework_constants.DELETED_USER_NAME except user_svc.NoSuchUserException: user_email = framework_constants.DELETED_USER_NAME", "API IssueRef PB.\"\"\" # missed issue ids are filtered out.", "name=project.project_name, externalId=project.project_name, htmlLink='/p/%s/' % project.project_name, summary=project.summary, description=project.description, role=role, issuesConfig=convert_project_config(config)) def", "== tracker_pb2.FieldTypes.ENUM_TYPE: raw_val = '%s-%s' % (fv.fieldName, fv.fieldValue) if fv.operator", "fileSize=attachment.filesize, mimetype=attachment.mimetype, isDeleted=attachment.deleted) def convert_amendments(issue, amendments, mar, services): \"\"\"Convert a", "mar.auth.effective_ids, mar.perms, comment.deleted_by, comment.user_id, mar.project, permissions.GetRestrictions(issue), granted_perms=granted_perms) return api_pb2_v1.IssueCommentWrapper( attachments=[convert_attachment(a)", "= int(fv.fieldValue) else: logging.warning( 'Unsupported field value type %s', field_def.field_type)", "api_pb2_v1.FieldValue() fv.fieldName = amendment.custom_field_name fv.fieldValue = amendment.newvalue result.fieldValues.append(fv) return result", "fd.field_id: fd.field_name for fd in config.field_defs} for fv in issue.field_values:", "None try: user = services.user.GetUser(cnxn, user_id) except user_svc.NoSuchUserException as ex:", "else: raise ex days_ago = None if user.last_visit_timestamp: secs_ago =", "status=template.status, labels=template.labels, membersOnly=template.members_only, defaultToMember=template.owner_defaults_to_member, componentRequired=template.component_required) def convert_person(user_id, cnxn, services, trap_exception=False):", "amendment.removed_user_ids: user_email = _get_user_email( services.user, mar.cnxn, user_id) result.cc.append('-%s' % user_email)", "ir.issueId)) return result else: return None def convert_issue(cls, issue, mar,", "ext_group_type=setting.ext_group_type, last_sync_time=setting.last_sync_time) def convert_component_def(cd, mar, services): \"\"\"Convert ComponentDef PB to", "fv_list_clear.append(field_def.field_id) continue # Enum fields are stored as labels if", "set() user_ids.update( cd.admin_ids + cd.cc_ids + [cd.creator_id] + [cd.modifier_id]) user_names_dict", "= _get_user_email( services.user, mar.cnxn, fv.user_id) elif fv.str_value: val = fv.str_value", "services, trap_exception=True), id=comment.sequence, published=datetime.datetime.fromtimestamp(comment.timestamp), updates=convert_amendments(issue, comment.amendments, mar, services), kind='monorail#issueComment') def", "PBs to global issue ids.\"\"\" if issueref_pbs: result = []", "project.project_name, summary=project.summary, description=project.description, role=role, issuesConfig=convert_project_config(config)) def convert_project_config(config): \"\"\"Convert Monorail ProjectIssueConfig", "issue, mar.auth.effective_ids, config) issue_project = services.project.GetProject(mar.cnxn, issue.project_id) component_list = []", "elif amendment.field == tracker_pb2.FieldID.COMPONENTS: result.components = amendment.newvalue.split() elif amendment.field ==", "api_pb2_v1.FieldValueOperator.remove: fv_list_remove.append(new_fv) elif fv.operator == api_pb2_v1.FieldValueOperator.add: fv_list_add.append(new_fv) else: logging.warning('Unsupported field", "== api_pb2_v1.FieldValueOperator.remove: label_list_remove.append(raw_val) elif fv.operator == api_pb2_v1.FieldValueOperator.add: label_list_add.append(raw_val) else: logging.warning('Unsupported", "<project>:<id> format.\"\"\" result = [] id_list = issue_ids.split() for id_str", "\"\"\"Convert Monorail PB objects to API PB objects\"\"\" import datetime", "mar, services, granted_perms): \"\"\"Convert Monorail IssueComment PB to API IssueCommentWrapper.\"\"\"", "= services.user.GetUser(cnxn, user_id) except user_svc.NoSuchUserException as ex: if trap_exception: logging.warning(str(ex))", "list_to_remove = [] for item in item_list: if item.startswith('-'): list_to_remove.append(item[1:])", "api_pb2_v1.Status( status=status.status, meansOpen=status.means_open, description=status.status_docstring) def convert_label(label): \"\"\"Convert Monorail LabelDef PB", "membersOnly=template.members_only, defaultToMember=template.owner_defaults_to_member, componentRequired=template.component_required) def convert_person(user_id, cnxn, services, trap_exception=False): \"\"\"Convert user", "project_id, ir.issueId) result.append(issue.issue_id) except issue_svc.NoSuchIssueException: logging.warning( 'Issue (%s:%d) does not", "user.last_visit_timestamp: secs_ago = int(time.time()) - user.last_visit_timestamp days_ago = secs_ago /", "api_pb2_v1.IssueCommentWrapper( attachments=[convert_attachment(a) for a in comment.attachments], author=convert_person(comment.user_id, mar.cnxn, services, trap_exception=True),", "if ':' in id_str: result.append(id_str) # '-' means this issue", "= cd.path if cpath.lower() in component_names_lower: result.append(cd.component_id) return result def", "user_id in amendment.removed_user_ids: user_email = _get_user_email( services.user, mar.cnxn, user_id) result.cc.append('-%s'", "of items into two: items to add and items to", "fv.operator == api_pb2_v1.FieldValueOperator.remove: label_list_remove.append(raw_val) elif fv.operator == api_pb2_v1.FieldValueOperator.add: label_list_add.append(raw_val) else:", "result = [] for issue in issues: issue_ref = api_pb2_v1.IssueRef(", "granted_perms = tracker_bizobj.GetGrantedPerms( issue, mar.auth.effective_ids, config) issue_project = services.project.GetProject(mar.cnxn, issue.project_id)", "amendment.newvalue elif amendment.field == tracker_pb2.FieldID.STATUS: result.status = amendment.newvalue elif amendment.field", "[] for cd in config.component_defs: cid = cd.component_id if cid", "config) else api_pb2_v1.IssueState.closed), labels=issue.labels, components=component_list, author=convert_person(issue.reporter_id, mar.cnxn, services), owner=convert_person(issue.owner_id, mar.cnxn,", "in cc_list if p is not None] field_values_list = []", "else: result.owner = _get_user_email( services.user, mar.cnxn, amendment.added_user_ids[0]) elif amendment.field ==", "= _append_project( amendment.newvalue, issue.project_name) elif amendment.field == tracker_pb2.FieldID.BLOCKING: result.blocking =", "email.\"\"\" try: user_email = user_service.LookupUserEmail( cnxn, user_id) if not user_email:", "two: items to add and items to remove.\"\"\" list_to_add =", "exist' % project_name) issue_project_id = project.project_id else: issue_project_id = project_id", "component_names_lower: result.append(cd.component_id) return result def convert_field_values(field_values, mar, services): \"\"\"Convert user", "not exist', fv.fieldName) continue if fv.operator == api_pb2_v1.FieldValueOperator.clear: fv_list_clear.append(field_def.field_id) continue", "kind='monorail#issuePerson', name=user.email, htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id), last_visit_days_ago=days_ago, email_bouncing=bool(user.email_bounce_timestamp), vacation_message=user.vacation_message) def", "raw_val = '%s-%s' % (fv.fieldName, fv.fieldValue) if fv.operator == api_pb2_v1.FieldValueOperator.remove:", "len(amendment.added_user_ids) == 0: result.owner = framework_constants.NO_USER_NAME else: result.owner = _get_user_email(", "= field_id_dict.get(fv.field_id) if not field_name: logging.warning('Custom field %d of project", "cls( kind='monorail#issue', id=issue.local_id, title=issue.summary, summary=issue.summary, projectId=issue_project.project_name, stars=issue.star_count, starred=services.issue_star.IsItemStarredBy( mar.cnxn, issue.issue_id,", "= datetime.datetime.fromtimestamp(cd.modified) component.modifier = user_names_dict[cd.modifier_id] return component def convert_component_ids(config, component_names):", "return result def split_remove_add(item_list): \"\"\"Split one list of items into", "project.project_id try: issue = services.issue.GetIssueByLocalID( mar.cnxn, project_id, ir.issueId) result.append(issue.issue_id) except", "to API Status PB.\"\"\" return api_pb2_v1.Status( status=status.status, meansOpen=status.means_open, description=status.status_docstring) def", "projectId=issue_project.project_name, stars=issue.star_count, starred=services.issue_star.IsItemStarredBy( mar.cnxn, issue.issue_id, mar.auth.user_id), status=issue.status, state=(api_pb2_v1.IssueState.open if tracker_helpers.MeansOpenInProject(", "this issue is being removed elif id_str.startswith('-'): result.append('-%s:%s' % (project_name,", "[] for item in item_list: if item.startswith('-'): list_to_remove.append(item[1:]) else: list_to_add.append(item)", "API ProjectIssueConfig PB.\"\"\" return api_pb2_v1.ProjectIssueConfig( kind='monorail#projectIssueConfig', restrictToKnown=config.restrict_to_known, defaultColumns=config.default_col_spec.split(), defaultSorting=config.default_sort_spec.split(), statuses=[convert_status(s)", "derived=fv.derived) field_values_list.append(new_fv) resp = cls( kind='monorail#issue', id=issue.local_id, title=issue.summary, summary=issue.summary, projectId=issue_project.project_name,", "UserGroupSettings to UserGroupSettingsWrapper.\"\"\" return api_pb2_v1.UserGroupSettingsWrapper( groupName=group_name, who_can_view_members=setting.who_can_view_members, ext_group_type=setting.ext_group_type, last_sync_time=setting.last_sync_time) def", "admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]), cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]),", "Monorail Project PB to API ProjectWrapper PB.\"\"\" return api_pb2_v1.ProjectWrapper( kind='monorail#project',", "user_ids.update( cd.admin_ids + cd.cc_ids + [cd.creator_id] + [cd.modifier_id]) user_names_dict =", "queries to fetch projects/issues. def issue_global_ids(project_local_id_pairs, project_id, mar, services): \"\"\"Find", "new_fv.str_value = fv.fieldValue elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE: new_fv.int_value = int(fv.fieldValue)", "user_email def _append_project(issue_ids, project_name): \"\"\"Append project name to convert <id>", "API Prompt PB.\"\"\" return api_pb2_v1.Prompt( name=template.name, title=template.summary, description=template.content, titleMustBeEdited=template.summary_must_be_edited, status=template.status,", "= datetime.datetime.fromtimestamp( issue.owner_modified_timestamp) if issue.status_modified_timestamp: resp.status_modified = datetime.datetime.fromtimestamp( issue.status_modified_timestamp) if", "not None] field_values_list = [] field_id_dict = { fd.field_id: fd.field_name", "TODO(sheyang): batch the SQL queries to fetch projects/issues. def issue_global_ids(project_local_id_pairs,", "who_can_view_members=setting.who_can_view_members, ext_group_type=setting.ext_group_type, last_sync_time=setting.last_sync_time) def convert_component_def(cd, mar, services): \"\"\"Convert ComponentDef PB", "tracker_helpers def convert_project(project, config, role): \"\"\"Convert Monorail Project PB to", "tracker_bizobj.GetGrantedPerms( issue, mar.auth.effective_ids, config) issue_project = services.project.GetProject(mar.cnxn, issue.project_id) component_list =", "global issues ids given <project_name>:<issue_local_id> pairs.\"\"\" result = [] for", "LabelDef PB to API Label PB.\"\"\" return api_pb2_v1.Label( label=label.label, description=label.label_docstring)", "user_svc.NoSuchUserException: user_email = framework_constants.DELETED_USER_NAME return user_email def _append_project(issue_ids, project_name): \"\"\"Append", "name=user.email, htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id), last_visit_days_ago=days_ago, email_bouncing=bool(user.email_bounce_timestamp), vacation_message=user.vacation_message) def convert_issue_ids(issue_ids,", "import framework_constants from framework import framework_helpers from framework import permissions", "for pair in project_local_id_pairs: issue_project_id = None local_id = None", "convert_template(template): \"\"\"Convert Monorail TemplateDef PB to API Prompt PB.\"\"\" return", "issue_project = services.project.GetProject(mar.cnxn, issue.project_id) component_list = [] for cd in", "not field_name: logging.warning('Custom field %d of project %s does not", "% (fv.fieldName, fv.fieldValue) if fv.operator == api_pb2_v1.FieldValueOperator.remove: label_list_remove.append(raw_val) elif fv.operator", "project_pb2 from proto import tracker_pb2 from services import issue_svc from", "in cd.cc_ids]), deprecated=cd.deprecated) if cd.created: component.created = datetime.datetime.fromtimestamp(cd.created) component.creator =", "mar.perms, issue_project, issue, granted_perms=granted_perms), fieldValues=field_values_list) if issue.closed_timestamp > 0: resp.closed", "issue, mar, services): \"\"\"Convert Monorail Issue PB to API IssuesGetInsertResponse.\"\"\"", "result.summary = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.STATUS: result.status = amendment.newvalue", "mar.auth.user_id), status=issue.status, state=(api_pb2_v1.IssueState.open if tracker_helpers.MeansOpenInProject( tracker_bizobj.GetStatus(issue), config) else api_pb2_v1.IssueState.closed), labels=issue.labels,", "canComment=permissions.CanCommentIssue( mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms), canEdit=permissions.CanEditIssue( mar.auth.effective_ids, mar.perms, issue_project,", "= user_service.LookupUserEmail( cnxn, user_id) if not user_email: user_email = framework_constants.DELETED_USER_NAME", "services): \"\"\"Convert Monorail Issue PB to API IssuesGetInsertResponse.\"\"\" config =", "_get_user_email( services.user, mar.cnxn, user_id) result.cc.append('-%s' % user_email) elif amendment.field ==", "fd in mar.config.field_defs} for fv in field_values: field_def = field_name_dict.get(fv.fieldName)", "def convert_component_def(cd, mar, services): \"\"\"Convert ComponentDef PB to Component PB.\"\"\"", "to API Label PB.\"\"\" return api_pb2_v1.Label( label=label.label, description=label.label_docstring) def convert_template(template):", "amendment.newvalue result.fieldValues.append(fv) return result def _get_user_email(user_service, cnxn, user_id): \"\"\"Get user", "updates=convert_amendments(issue, comment.amendments, mar, services), kind='monorail#issueComment') def convert_attachment(attachment): \"\"\"Convert Monorail Attachment", "cd.cc_ids]), deprecated=cd.deprecated) if cd.created: component.created = datetime.datetime.fromtimestamp(cd.created) component.creator = user_names_dict[cd.creator_id]", "return result def convert_group_settings(group_name, setting): \"\"\"Convert UserGroupSettings to UserGroupSettingsWrapper.\"\"\" return", "Issue PB to API IssuesGetInsertResponse.\"\"\" config = services.config.GetProjectConfig(mar.cnxn, issue.project_id) granted_perms", "val = str(fv.int_value) new_fv = api_pb2_v1.FieldValue( fieldName=field_name, fieldValue=val, derived=fv.derived) field_values_list.append(new_fv)", "issue.component_modified_timestamp: resp.component_modified = datetime.datetime.fromtimestamp( issue.component_modified_timestamp) return resp def convert_comment(issue, comment,", "0: result.owner = framework_constants.NO_USER_NAME else: result.owner = _get_user_email( services.user, mar.cnxn,", "\"\"\"Convert global issue ids to API IssueRef PB.\"\"\" # missed", "%s does not exist' % project_name) issue_project_id = project.project_id else:", "convert_component_ids(config, component_names): \"\"\"Convert a list of component names to ids.\"\"\"", "= services.issue.GetIssueByLocalID( mar.cnxn, project_id, ir.issueId) result.append(issue.issue_id) except issue_svc.NoSuchIssueException: logging.warning( 'Issue", "amendment.field == tracker_pb2.FieldID.OWNER: if len(amendment.added_user_ids) == 0: result.owner = framework_constants.NO_USER_NAME", "[] fv_list_clear = [] label_list_add = [] label_list_remove = []", "= str(fv.int_value) new_fv = api_pb2_v1.FieldValue( fieldName=field_name, fieldValue=val, derived=fv.derived) field_values_list.append(new_fv) resp", "amendments, mar, services): \"\"\"Convert a list of Monorail Amendment PBs", "mar, services)[0] if issue.owner_modified_timestamp: resp.owner_modified = datetime.datetime.fromtimestamp( issue.owner_modified_timestamp) if issue.status_modified_timestamp:", "objects to API PB objects\"\"\" import datetime import logging import", "title=issue.summary, summary=issue.summary, projectId=issue_project.project_name, stars=issue.star_count, starred=services.issue_star.IsItemStarredBy( mar.cnxn, issue.issue_id, mar.auth.user_id), status=issue.status, state=(api_pb2_v1.IssueState.open", "user_email) elif amendment.field == tracker_pb2.FieldID.BLOCKEDON: result.blockedOn = _append_project( amendment.newvalue, issue.project_name)", "IssueCommentWrapper.\"\"\" can_delete = permissions.CanDelete( mar.auth.user_id, mar.auth.effective_ids, mar.perms, comment.deleted_by, comment.user_id, mar.project,", "None] field_values_list = [] field_id_dict = { fd.field_id: fd.field_name for", "cc=cc_list, updated=datetime.datetime.fromtimestamp(issue.modified_timestamp), published=datetime.datetime.fromtimestamp(issue.opened_timestamp), blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services), blocking=convert_issue_ids(issue.blocking_iids, mar, services), canComment=permissions.CanCommentIssue(", "if cpath.lower() in component_names_lower: result.append(cd.component_id) return result def convert_field_values(field_values, mar,", "= fv.fieldValue elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE: new_fv.int_value = int(fv.fieldValue) else:", "list_to_add = [] list_to_remove = [] for item in item_list:", "api_pb2_v1.AtomPerson( kind='monorail#issuePerson', name=user.email, htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id), last_visit_days_ago=days_ago, email_bouncing=bool(user.email_bounce_timestamp), vacation_message=user.vacation_message)", "a list of Monorail Amendment PBs to API Update.\"\"\" result", "<project_name>:<issue_local_id> pairs.\"\"\" result = [] for pair in project_local_id_pairs: issue_project_id", "amendment.field == tracker_pb2.FieldID.STATUS: result.status = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.OWNER:", "deprecated=cd.deprecated) if cd.created: component.created = datetime.datetime.fromtimestamp(cd.created) component.creator = user_names_dict[cd.creator_id] if", "new_fv = tracker_pb2.FieldValue( field_id=field_def.field_id) if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE: try: new_fv.user_id", "_append_project( amendment.newvalue, issue.project_name) elif amendment.field == tracker_pb2.FieldID.BLOCKING: result.blocking = _append_project(", "for p in cc_list if p is not None] field_values_list", "to remove.\"\"\" list_to_add = [] list_to_remove = [] for item", "a list of component names to ids.\"\"\" component_names_lower = [name.lower()", "mar, services): \"\"\"Convert a list of Monorail Amendment PBs to", "for fd in mar.config.field_defs} for fv in field_values: field_def =", "= datetime.datetime.fromtimestamp( issue.component_modified_timestamp) return resp def convert_comment(issue, comment, mar, services,", "\"\"\"Split one list of items into two: items to add", "if fv.operator == api_pb2_v1.FieldValueOperator.remove: label_list_remove.append(raw_val) elif fv.operator == api_pb2_v1.FieldValueOperator.add: label_list_add.append(raw_val)", "new_fv.int_value = int(fv.fieldValue) else: logging.warning( 'Unsupported field value type %s',", "cd.cc_ids + [cd.creator_id] + [cd.modifier_id]) user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids)) component", "mar.cnxn, amendment.added_user_ids[0]) elif amendment.field == tracker_pb2.FieldID.LABELS: result.labels = amendment.newvalue.split() elif", "source code is govered by a BSD-style # license that", "ids given <project_name>:<issue_local_id> pairs.\"\"\" result = [] for pair in", "issue_ref = api_pb2_v1.IssueRef( issueId=issue.local_id, projectId=issue.project_name, kind='monorail#issueRef') result.append(issue_ref) return result def", "tracker_pb2.FieldID.CC: for user_id in amendment.added_user_ids: user_email = _get_user_email( services.user, mar.cnxn,", "import project_pb2 from proto import tracker_pb2 from services import issue_svc", "stored as labels if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE: raw_val = '%s-%s'", "amendment.field == tracker_pb2.FieldID.LABELS: result.labels = amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CC:", "= amendment.newvalue result.fieldValues.append(fv) return result def _get_user_email(user_service, cnxn, user_id): \"\"\"Get", "import timestr from proto import api_pb2_v1 from proto import project_pb2", "services.user.LookupUserID(mar.cnxn, fv.fieldValue) except user_svc.NoSuchUserException: new_fv.user_id = 0 elif field_def.field_type ==", "in config.well_known_labels], prompts=[convert_template(t) for t in config.templates], defaultPromptForMembers=config.default_template_for_developers, defaultPromptForNonMembers=config.default_template_for_users) def", "= None if fv.user_id: val = _get_user_email( services.user, mar.cnxn, fv.user_id)", "Monorail TemplateDef PB to API Prompt PB.\"\"\" return api_pb2_v1.Prompt( name=template.name,", "not exist', fv.field_id, issue_project.project_name) continue val = None if fv.user_id:", "return api_pb2_v1.Label( label=label.label, description=label.label_docstring) def convert_template(template): \"\"\"Convert Monorail TemplateDef PB", "mar.cnxn, ir.projectId) if project: project_id = project.project_id try: issue =", "if issue.component_modified_timestamp: resp.component_modified = datetime.datetime.fromtimestamp( issue.component_modified_timestamp) return resp def convert_comment(issue,", "user_id) result.cc.append(user_email) for user_id in amendment.removed_user_ids: user_email = _get_user_email( services.user,", "services): \"\"\"Convert global issue ids to API IssueRef PB.\"\"\" #", "def convert_label(label): \"\"\"Convert Monorail LabelDef PB to API Label PB.\"\"\"", "result = [] for ir in issueref_pbs: project_id = mar.project_id", "convert_amendments(issue, amendments, mar, services): \"\"\"Convert a list of Monorail Amendment", "Monorail PB objects to API PB objects\"\"\" import datetime import", "to API Attachment.\"\"\" return api_pb2_v1.Attachment( attachmentId=attachment.attachment_id, fileName=attachment.filename, fileSize=attachment.filesize, mimetype=attachment.mimetype, isDeleted=attachment.deleted)", "is govered by a BSD-style # license that can be", "project_id = mar.project_id if ir.projectId: project = services.project.GetProjectByName( mar.cnxn, ir.projectId)", "api_pb2_v1.UserGroupSettingsWrapper( groupName=group_name, who_can_view_members=setting.who_can_view_members, ext_group_type=setting.ext_group_type, last_sync_time=setting.last_sync_time) def convert_component_def(cd, mar, services): \"\"\"Convert", "datetime.datetime.fromtimestamp(cd.modified) component.modifier = user_names_dict[cd.modifier_id] return component def convert_component_ids(config, component_names): \"\"\"Convert", "= [] field_name_dict = { fd.field_name: fd for fd in", "if fv.operator == api_pb2_v1.FieldValueOperator.remove: fv_list_remove.append(new_fv) elif fv.operator == api_pb2_v1.FieldValueOperator.add: fv_list_add.append(new_fv)", "projectId=issue.project_name, kind='monorail#issueRef') result.append(issue_ref) return result def convert_issueref_pbs(issueref_pbs, mar, services): \"\"\"Convert", "convert_component_def(cd, mar, services): \"\"\"Convert ComponentDef PB to Component PB.\"\"\" project_name", "secs_ago = int(time.time()) - user.last_visit_timestamp days_ago = secs_ago / framework_constants.SECS_PER_DAY", "mar.cnxn, services, trap_exception=True), canDelete=can_delete, content=comment.content, deletedBy=convert_person(comment.deleted_by, mar.cnxn, services, trap_exception=True), id=comment.sequence,", "logging.warning( 'Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId)) return", "datetime.datetime.fromtimestamp( issue.component_modified_timestamp) return resp def convert_comment(issue, comment, mar, services, granted_perms):", "== tracker_pb2.FieldID.BLOCKEDON: result.blockedOn = _append_project( amendment.newvalue, issue.project_name) elif amendment.field ==", "ids.\"\"\" component_names_lower = [name.lower() for name in component_names] result =", "import time from framework import framework_constants from framework import framework_helpers", "convert_status(status): \"\"\"Convert Monorail StatusDef PB to API Status PB.\"\"\" return", "to API Prompt PB.\"\"\" return api_pb2_v1.Prompt( name=template.name, title=template.summary, description=template.content, titleMustBeEdited=template.summary_must_be_edited,", "fv.fieldName = amendment.custom_field_name fv.fieldValue = amendment.newvalue result.fieldValues.append(fv) return result def", "issue.project_name) elif amendment.field == tracker_pb2.FieldID.BLOCKING: result.blocking = _append_project( amendment.newvalue, issue.project_name)", "result.cc.append('-%s' % user_email) elif amendment.field == tracker_pb2.FieldID.BLOCKEDON: result.blockedOn = _append_project(", "items into two: items to add and items to remove.\"\"\"", "field_values_list.append(new_fv) resp = cls( kind='monorail#issue', id=issue.local_id, title=issue.summary, summary=issue.summary, projectId=issue_project.project_name, stars=issue.star_count,", "== tracker_pb2.FieldTypes.STR_TYPE: new_fv.str_value = fv.fieldValue elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE: new_fv.int_value", "in comment.attachments], author=convert_person(comment.user_id, mar.cnxn, services, trap_exception=True), canDelete=can_delete, content=comment.content, deletedBy=convert_person(comment.deleted_by, mar.cnxn,", "API Update.\"\"\" result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate') for amendment in amendments: if", "list_to_remove.append(item[1:]) else: list_to_add.append(item) return list_to_add, list_to_remove # TODO(sheyang): batch the", "id=comment.sequence, published=datetime.datetime.fromtimestamp(comment.timestamp), updates=convert_amendments(issue, comment.amendments, mar, services), kind='monorail#issueComment') def convert_attachment(attachment): \"\"\"Convert", "def convert_comment(issue, comment, mar, services, granted_perms): \"\"\"Convert Monorail IssueComment PB", "Monorail StatusDef PB to API Status PB.\"\"\" return api_pb2_v1.Status( status=status.status,", "in id_list: if ':' in id_str: result.append(id_str) # '-' means", "Enum fields are stored as labels if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE:", "services)[0] if issue.owner_modified_timestamp: resp.owner_modified = datetime.datetime.fromtimestamp( issue.owner_modified_timestamp) if issue.status_modified_timestamp: resp.status_modified", "API Attachment.\"\"\" return api_pb2_v1.Attachment( attachmentId=attachment.attachment_id, fileName=attachment.filename, fileSize=attachment.filesize, mimetype=attachment.mimetype, isDeleted=attachment.deleted) def", "mar, services): \"\"\"Find global issues ids given <project_name>:<issue_local_id> pairs.\"\"\" result", "services.user, mar.cnxn, user_id) result.cc.append('-%s' % user_email) elif amendment.field == tracker_pb2.FieldID.BLOCKEDON:", "services), blocking=convert_issue_ids(issue.blocking_iids, mar, services), canComment=permissions.CanCommentIssue( mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms),", "else: logging.warning('Unsupported field value operater %s', fv.operator) return (fv_list_add, fv_list_remove,", "framework import timestr from proto import api_pb2_v1 from proto import", "can_delete = permissions.CanDelete( mar.auth.user_id, mar.auth.effective_ids, mar.perms, comment.deleted_by, comment.user_id, mar.project, permissions.GetRestrictions(issue),", "issue_project_id = None local_id = None if ':' in pair:", "cd.created: component.created = datetime.datetime.fromtimestamp(cd.created) component.creator = user_names_dict[cd.creator_id] if cd.modified: component.modified", "issuesConfig=convert_project_config(config)) def convert_project_config(config): \"\"\"Convert Monorail ProjectIssueConfig PB to API ProjectIssueConfig", "FieldValue PB, or labels.\"\"\" fv_list_add = [] fv_list_remove = []", "if cd.modified: component.modified = datetime.datetime.fromtimestamp(cd.modified) component.modifier = user_names_dict[cd.modifier_id] return component", "= api_pb2_v1.Update(kind='monorail#issueCommentUpdate') for amendment in amendments: if amendment.field == tracker_pb2.FieldID.SUMMARY:", "services, trap_exception=True), canDelete=can_delete, content=comment.content, deletedBy=convert_person(comment.deleted_by, mar.cnxn, services, trap_exception=True), id=comment.sequence, published=datetime.datetime.fromtimestamp(comment.timestamp),", "services.project.GetProjectByName( mar.cnxn, ir.projectId) if project: project_id = project.project_id try: issue", "in config.component_defs: cid = cd.component_id if cid in issue.component_ids: component_list.append(cd.path)", "== tracker_pb2.FieldID.COMPONENTS: result.components = amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CUSTOM: fv", "convert_field_values(field_values, mar, services): \"\"\"Convert user passed in field value list", "elif amendment.field == tracker_pb2.FieldID.BLOCKING: result.blocking = _append_project( amendment.newvalue, issue.project_name) elif", "amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CUSTOM: fv = api_pb2_v1.FieldValue() fv.fieldName =", "(fv.fieldName, fv.fieldValue) if fv.operator == api_pb2_v1.FieldValueOperator.remove: label_list_remove.append(raw_val) elif fv.operator ==", "p in cc_list if p is not None] field_values_list =", "config.templates], defaultPromptForMembers=config.default_template_for_developers, defaultPromptForNonMembers=config.default_template_for_users) def convert_status(status): \"\"\"Convert Monorail StatusDef PB to", "[p for p in cc_list if p is not None]", "raise ex days_ago = None if user.last_visit_timestamp: secs_ago = int(time.time())", "mar.cnxn, services), owner=convert_person(issue.owner_id, mar.cnxn, services), cc=cc_list, updated=datetime.datetime.fromtimestamp(issue.modified_timestamp), published=datetime.datetime.fromtimestamp(issue.opened_timestamp), blockedOn=convert_issue_ids(issue.blocked_on_iids, mar,", "convert_attachment(attachment): \"\"\"Convert Monorail Attachment PB to API Attachment.\"\"\" return api_pb2_v1.Attachment(", "\"\"\"Convert user id to API AtomPerson PB.\"\"\" if not user_id:", "means this issue is being removed elif id_str.startswith('-'): result.append('-%s:%s' %", "_append_project(issue_ids, project_name): \"\"\"Append project name to convert <id> to <project>:<id>", "componentId=cd.component_id, projectName=project_name, componentPath=cd.path, description=cd.docstring, admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]), cc=sorted([user_names_dict[uid]", "name=template.name, title=template.summary, description=template.content, titleMustBeEdited=template.summary_must_be_edited, status=template.status, labels=template.labels, membersOnly=template.members_only, defaultToMember=template.owner_defaults_to_member, componentRequired=template.component_required) def", "as ex: if trap_exception: logging.warning(str(ex)) return None else: raise ex", "API AtomPerson PB.\"\"\" if not user_id: return None try: user", "componentPath=cd.path, description=cd.docstring, admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]), cc=sorted([user_names_dict[uid] for uid", "convert_project_config(config): \"\"\"Convert Monorail ProjectIssueConfig PB to API ProjectIssueConfig PB.\"\"\" return", "field_def = field_name_dict.get(fv.fieldName) if not field_def: logging.warning('Custom field %s of", "= pair_ary[0] local_id = int(pair_ary[1]) project = services.project.GetProjectByName(mar.cnxn, project_name) if", "canEdit=permissions.CanEditIssue( mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms), fieldValues=field_values_list) if issue.closed_timestamp >", "issue_svc from services import project_svc from services import user_svc from", "> 0: resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp) if issue.merged_into: resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0]", "= fv.str_value elif fv.int_value: val = str(fv.int_value) new_fv = api_pb2_v1.FieldValue(", "result.mergedInto = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.COMPONENTS: result.components = amendment.newvalue.split()", "resp.status_modified = datetime.datetime.fromtimestamp( issue.status_modified_timestamp) if issue.component_modified_timestamp: resp.component_modified = datetime.datetime.fromtimestamp( issue.component_modified_timestamp)", "result def _get_user_email(user_service, cnxn, user_id): \"\"\"Get user email.\"\"\" try: user_email", "id_list: if ':' in id_str: result.append(id_str) # '-' means this", "services), cc=cc_list, updated=datetime.datetime.fromtimestamp(issue.modified_timestamp), published=datetime.datetime.fromtimestamp(issue.opened_timestamp), blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services), blocking=convert_issue_ids(issue.blocking_iids, mar, services),", "id_str[1:])) else: result.append('%s:%s' % (project_name, id_str)) return result def split_remove_add(item_list):", "batch the SQL queries to fetch projects/issues. def issue_global_ids(project_local_id_pairs, project_id,", "operater %s', fv.operator) else: new_fv = tracker_pb2.FieldValue( field_id=field_def.field_id) if field_def.field_type", "\"\"\"Convert a list of Monorail Amendment PBs to API Update.\"\"\"", "and items to remove.\"\"\" list_to_add = [] list_to_remove = []", "user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids)) component = api_pb2_v1.Component( componentId=cd.component_id, projectName=project_name, componentPath=cd.path,", "mar, services), kind='monorail#issueComment') def convert_attachment(attachment): \"\"\"Convert Monorail Attachment PB to", "component def convert_component_ids(config, component_names): \"\"\"Convert a list of component names", "Prompt PB.\"\"\" return api_pb2_v1.Prompt( name=template.name, title=template.summary, description=template.content, titleMustBeEdited=template.summary_must_be_edited, status=template.status, labels=template.labels,", "for t in config.templates], defaultPromptForMembers=config.default_template_for_developers, defaultPromptForNonMembers=config.default_template_for_users) def convert_status(status): \"\"\"Convert Monorail", "services.user.LookupUserEmails(mar.cnxn, list(user_ids)) component = api_pb2_v1.Component( componentId=cd.component_id, projectName=project_name, componentPath=cd.path, description=cd.docstring, admin=sorted([user_names_dict[uid]", "StatusDef PB to API Status PB.\"\"\" return api_pb2_v1.Status( status=status.status, meansOpen=status.means_open,", "secs_ago / framework_constants.SECS_PER_DAY return api_pb2_v1.AtomPerson( kind='monorail#issuePerson', name=user.email, htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(),", "user_id) if not user_email: user_email = framework_constants.DELETED_USER_NAME except user_svc.NoSuchUserException: user_email", "to convert <id> to <project>:<id> format.\"\"\" result = [] id_list", "issue_project_id, local_id)) return result def convert_group_settings(group_name, setting): \"\"\"Convert UserGroupSettings to", "project_name) if not project: raise project_svc.NoSuchProjectException( 'Project %s does not", "of this source code is govered by a BSD-style #", "%d of project %s does not exist', fv.field_id, issue_project.project_name) continue", "from tracker import tracker_helpers def convert_project(project, config, role): \"\"\"Convert Monorail", "fieldValue=val, derived=fv.derived) field_values_list.append(new_fv) resp = cls( kind='monorail#issue', id=issue.local_id, title=issue.summary, summary=issue.summary,", "is being removed elif id_str.startswith('-'): result.append('-%s:%s' % (project_name, id_str[1:])) else:", "elif amendment.field == tracker_pb2.FieldID.MERGEDINTO: result.mergedInto = amendment.newvalue elif amendment.field ==", "cc_list = [p for p in cc_list if p is", "for fv in issue.field_values: field_name = field_id_dict.get(fv.field_id) if not field_name:", "issue_global_ids(project_local_id_pairs, project_id, mar, services): \"\"\"Find global issues ids given <project_name>:<issue_local_id>", "def convert_project_config(config): \"\"\"Convert Monorail ProjectIssueConfig PB to API ProjectIssueConfig PB.\"\"\"", "'Project %s does not exist' % project_name) issue_project_id = project.project_id", "from framework import permissions from framework import timestr from proto", "field value operater %s', fv.operator) else: new_fv = tracker_pb2.FieldValue( field_id=field_def.field_id)", "PB to API Label PB.\"\"\" return api_pb2_v1.Label( label=label.label, description=label.label_docstring) def", "field_def.field_type) if fv.operator == api_pb2_v1.FieldValueOperator.remove: fv_list_remove.append(new_fv) elif fv.operator == api_pb2_v1.FieldValueOperator.add:", "= framework_constants.DELETED_USER_NAME return user_email def _append_project(issue_ids, project_name): \"\"\"Append project name", "cd.admin_ids]), cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]), deprecated=cd.deprecated) if cd.created: component.created", "= services.config.GetProjectConfig(mar.cnxn, issue.project_id) granted_perms = tracker_bizobj.GetGrantedPerms( issue, mar.auth.effective_ids, config) issue_project", "stars=issue.star_count, starred=services.issue_star.IsItemStarredBy( mar.cnxn, issue.issue_id, mar.auth.user_id), status=issue.status, state=(api_pb2_v1.IssueState.open if tracker_helpers.MeansOpenInProject( tracker_bizobj.GetStatus(issue),", "result.append(id_str) # '-' means this issue is being removed elif", "deletedBy=convert_person(comment.deleted_by, mar.cnxn, services, trap_exception=True), id=comment.sequence, published=datetime.datetime.fromtimestamp(comment.timestamp), updates=convert_amendments(issue, comment.amendments, mar, services),", "tracker_pb2.FieldValue( field_id=field_def.field_id) if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE: try: new_fv.user_id = services.user.LookupUserID(mar.cnxn,", "elif amendment.field == tracker_pb2.FieldID.OWNER: if len(amendment.added_user_ids) == 0: result.owner =", "import project_svc from services import user_svc from tracker import tracker_bizobj", "AtomPerson PB.\"\"\" if not user_id: return None try: user =", "ProjectIssueConfig PB to API ProjectIssueConfig PB.\"\"\" return api_pb2_v1.ProjectIssueConfig( kind='monorail#projectIssueConfig', restrictToKnown=config.restrict_to_known,", "attachments=[convert_attachment(a) for a in comment.attachments], author=convert_person(comment.user_id, mar.cnxn, services, trap_exception=True), canDelete=can_delete,", "user_names_dict[cd.creator_id] if cd.modified: component.modified = datetime.datetime.fromtimestamp(cd.modified) component.modifier = user_names_dict[cd.modifier_id] return", "{ fd.field_id: fd.field_name for fd in config.field_defs} for fv in", "else api_pb2_v1.IssueState.closed), labels=issue.labels, components=component_list, author=convert_person(issue.reporter_id, mar.cnxn, services), owner=convert_person(issue.owner_id, mar.cnxn, services),", "in id_str: result.append(id_str) # '-' means this issue is being", "mimetype=attachment.mimetype, isDeleted=attachment.deleted) def convert_amendments(issue, amendments, mar, services): \"\"\"Convert a list", "issue_ids.split() for id_str in id_list: if ':' in id_str: result.append(id_str)", "project_local_id_pairs: issue_project_id = None local_id = None if ':' in", "\"\"\"Convert ComponentDef PB to Component PB.\"\"\" project_name = services.project.LookupProjectNames( mar.cnxn,", "config.component_defs: cid = cd.component_id if cid in issue.component_ids: component_list.append(cd.path) cc_list", "\"\"\"Convert Monorail Project PB to API ProjectWrapper PB.\"\"\" return api_pb2_v1.ProjectWrapper(", "to API Update.\"\"\" result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate') for amendment in amendments:", "_get_user_email( services.user, mar.cnxn, amendment.added_user_ids[0]) elif amendment.field == tracker_pb2.FieldID.LABELS: result.labels =", "tracker_pb2.FieldID.BLOCKING: result.blocking = _append_project( amendment.newvalue, issue.project_name) elif amendment.field == tracker_pb2.FieldID.MERGEDINTO:", "issueref_pbs: result = [] for ir in issueref_pbs: project_id =", "project_svc.NoSuchProjectException( 'Project %s does not exist' % project_name) issue_project_id =", "kind='monorail#issueRef') result.append(issue_ref) return result def convert_issueref_pbs(issueref_pbs, mar, services): \"\"\"Convert API", "= project.project_id try: issue = services.issue.GetIssueByLocalID( mar.cnxn, project_id, ir.issueId) result.append(issue.issue_id)", "mar, services), canComment=permissions.CanCommentIssue( mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms), canEdit=permissions.CanEditIssue( mar.auth.effective_ids,", "for user_id in amendment.removed_user_ids: user_email = _get_user_email( services.user, mar.cnxn, user_id)", "PB.\"\"\" return api_pb2_v1.Status( status=status.status, meansOpen=status.means_open, description=status.status_docstring) def convert_label(label): \"\"\"Convert Monorail", "result.labels = amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CC: for user_id in", "_get_user_email(user_service, cnxn, user_id): \"\"\"Get user email.\"\"\" try: user_email = user_service.LookupUserEmail(", "project_id, mar, services): \"\"\"Find global issues ids given <project_name>:<issue_local_id> pairs.\"\"\"", "[cd.project_id])[cd.project_id] user_ids = set() user_ids.update( cd.admin_ids + cd.cc_ids + [cd.creator_id]", "list_to_remove # TODO(sheyang): batch the SQL queries to fetch projects/issues.", "in amendment.removed_user_ids: user_email = _get_user_email( services.user, mar.cnxn, user_id) result.cc.append('-%s' %", "project: raise project_svc.NoSuchProjectException( 'Project %s does not exist' % project_name)", "t in config.templates], defaultPromptForMembers=config.default_template_for_developers, defaultPromptForNonMembers=config.default_template_for_users) def convert_status(status): \"\"\"Convert Monorail StatusDef", "amendments: if amendment.field == tracker_pb2.FieldID.SUMMARY: result.summary = amendment.newvalue elif amendment.field", "project name to convert <id> to <project>:<id> format.\"\"\" result =", "to API PB objects\"\"\" import datetime import logging import time", "labels=issue.labels, components=component_list, author=convert_person(issue.reporter_id, mar.cnxn, services), owner=convert_person(issue.owner_id, mar.cnxn, services), cc=cc_list, updated=datetime.datetime.fromtimestamp(issue.modified_timestamp),", "import datetime import logging import time from framework import framework_constants", "mar.auth.effective_ids, config) issue_project = services.project.GetProject(mar.cnxn, issue.project_id) component_list = [] for", "description=template.content, titleMustBeEdited=template.summary_must_be_edited, status=template.status, labels=template.labels, membersOnly=template.members_only, defaultToMember=template.owner_defaults_to_member, componentRequired=template.component_required) def convert_person(user_id, cnxn,", "elif amendment.field == tracker_pb2.FieldID.CC: for user_id in amendment.added_user_ids: user_email =", "2016 The Chromium Authors. All rights reserved. # Use of", "time from framework import framework_constants from framework import framework_helpers from", "comment.attachments], author=convert_person(comment.user_id, mar.cnxn, services, trap_exception=True), canDelete=can_delete, content=comment.content, deletedBy=convert_person(comment.deleted_by, mar.cnxn, services,", "tracker_pb2.FieldID.CUSTOM: fv = api_pb2_v1.FieldValue() fv.fieldName = amendment.custom_field_name fv.fieldValue = amendment.newvalue", "pair_ary[0] local_id = int(pair_ary[1]) project = services.project.GetProjectByName(mar.cnxn, project_name) if not", "The Chromium Authors. All rights reserved. # Use of this", "= [] for pair in project_local_id_pairs: issue_project_id = None local_id", "result def convert_group_settings(group_name, setting): \"\"\"Convert UserGroupSettings to UserGroupSettingsWrapper.\"\"\" return api_pb2_v1.UserGroupSettingsWrapper(", "%s', field_def.field_type) if fv.operator == api_pb2_v1.FieldValueOperator.remove: fv_list_remove.append(new_fv) elif fv.operator ==", "# Use of this source code is govered by a", "Update.\"\"\" result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate') for amendment in amendments: if amendment.field", "= amendment.newvalue elif amendment.field == tracker_pb2.FieldID.COMPONENTS: result.components = amendment.newvalue.split() elif", "else: issue_project_id = project_id local_id = int(pair) result.append( services.issue.LookupIssueID(mar.cnxn, issue_project_id,", "amendment.field == tracker_pb2.FieldID.SUMMARY: result.summary = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.STATUS:", "in issue.component_ids: component_list.append(cd.path) cc_list = [convert_person(p, mar.cnxn, services) for p", "Monorail IssueComment PB to API IssueCommentWrapper.\"\"\" can_delete = permissions.CanDelete( mar.auth.user_id,", "owner=convert_person(issue.owner_id, mar.cnxn, services), cc=cc_list, updated=datetime.datetime.fromtimestamp(issue.modified_timestamp), published=datetime.datetime.fromtimestamp(issue.opened_timestamp), blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services), blocking=convert_issue_ids(issue.blocking_iids,", "\"\"\"Convert a list of component names to ids.\"\"\" component_names_lower =", "author=convert_person(issue.reporter_id, mar.cnxn, services), owner=convert_person(issue.owner_id, mar.cnxn, services), cc=cc_list, updated=datetime.datetime.fromtimestamp(issue.modified_timestamp), published=datetime.datetime.fromtimestamp(issue.opened_timestamp), blockedOn=convert_issue_ids(issue.blocked_on_iids,", "labels=template.labels, membersOnly=template.members_only, defaultToMember=template.owner_defaults_to_member, componentRequired=template.component_required) def convert_person(user_id, cnxn, services, trap_exception=False): \"\"\"Convert", "description=status.status_docstring) def convert_label(label): \"\"\"Convert Monorail LabelDef PB to API Label", "to API IssuesGetInsertResponse.\"\"\" config = services.config.GetProjectConfig(mar.cnxn, issue.project_id) granted_perms = tracker_bizobj.GetGrantedPerms(", "api_pb2_v1 from proto import project_pb2 from proto import tracker_pb2 from", "value list to FieldValue PB, or labels.\"\"\" fv_list_add = []", "issue = services.issue.GetIssueByLocalID( mar.cnxn, project_id, ir.issueId) result.append(issue.issue_id) except issue_svc.NoSuchIssueException: logging.warning(", "for name in component_names] result = [] for cd in", "issue, granted_perms=granted_perms), canEdit=permissions.CanEditIssue( mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms), fieldValues=field_values_list) if", "= [] label_list_add = [] label_list_remove = [] field_name_dict =", "in config.component_defs: cpath = cd.path if cpath.lower() in component_names_lower: result.append(cd.component_id)", "projects/issues. def issue_global_ids(project_local_id_pairs, project_id, mar, services): \"\"\"Find global issues ids", "= _get_user_email( services.user, mar.cnxn, user_id) result.cc.append('-%s' % user_email) elif amendment.field", "\"\"\"Convert Monorail IssueComment PB to API IssueCommentWrapper.\"\"\" can_delete = permissions.CanDelete(", "uid in cd.admin_ids]), cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]), deprecated=cd.deprecated) if", "services.user, mar.cnxn, user_id) result.cc.append(user_email) for user_id in amendment.removed_user_ids: user_email =", "rights reserved. # Use of this source code is govered", "field %d of project %s does not exist', fv.field_id, issue_project.project_name)", "fv_list_add = [] fv_list_remove = [] fv_list_clear = [] label_list_add", "does not exist', fv.fieldName) continue if fv.operator == api_pb2_v1.FieldValueOperator.clear: fv_list_clear.append(field_def.field_id)", "label_list_add = [] label_list_remove = [] field_name_dict = { fd.field_name:", "for cd in config.component_defs: cpath = cd.path if cpath.lower() in", "= api_pb2_v1.Component( componentId=cd.component_id, projectName=project_name, componentPath=cd.path, description=cd.docstring, admin=sorted([user_names_dict[uid] for uid in", "amendment.newvalue, issue.project_name) elif amendment.field == tracker_pb2.FieldID.BLOCKING: result.blocking = _append_project( amendment.newvalue,", "does not exist', fv.field_id, issue_project.project_name) continue val = None if", "trap_exception=True), id=comment.sequence, published=datetime.datetime.fromtimestamp(comment.timestamp), updates=convert_amendments(issue, comment.amendments, mar, services), kind='monorail#issueComment') def convert_attachment(attachment):", "(framework_helpers.GetHostPort(), user_id), last_visit_days_ago=days_ago, email_bouncing=bool(user.email_bounce_timestamp), vacation_message=user.vacation_message) def convert_issue_ids(issue_ids, mar, services): \"\"\"Convert", "= [] for cd in config.component_defs: cid = cd.component_id if", "convert_comment(issue, comment, mar, services, granted_perms): \"\"\"Convert Monorail IssueComment PB to", "to fetch projects/issues. def issue_global_ids(project_local_id_pairs, project_id, mar, services): \"\"\"Find global", "= amendment.newvalue elif amendment.field == tracker_pb2.FieldID.STATUS: result.status = amendment.newvalue elif", "PB.\"\"\" project_name = services.project.LookupProjectNames( mar.cnxn, [cd.project_id])[cd.project_id] user_ids = set() user_ids.update(", "list of component names to ids.\"\"\" component_names_lower = [name.lower() for", "result.components = amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CUSTOM: fv = api_pb2_v1.FieldValue()", "cd in config.component_defs: cpath = cd.path if cpath.lower() in component_names_lower:", "fieldName=field_name, fieldValue=val, derived=fv.derived) field_values_list.append(new_fv) resp = cls( kind='monorail#issue', id=issue.local_id, title=issue.summary,", "ir.projectId: project = services.project.GetProjectByName( mar.cnxn, ir.projectId) if project: project_id =", "else: result.append('%s:%s' % (project_name, id_str)) return result def split_remove_add(item_list): \"\"\"Split", "issue.component_modified_timestamp) return resp def convert_comment(issue, comment, mar, services, granted_perms): \"\"\"Convert", "= user_names_dict[cd.modifier_id] return component def convert_component_ids(config, component_names): \"\"\"Convert a list", "return api_pb2_v1.UserGroupSettingsWrapper( groupName=group_name, who_can_view_members=setting.who_can_view_members, ext_group_type=setting.ext_group_type, last_sync_time=setting.last_sync_time) def convert_component_def(cd, mar, services):", "(ir.projectId, ir.issueId)) return result else: return None def convert_issue(cls, issue,", "defaultPromptForNonMembers=config.default_template_for_users) def convert_status(status): \"\"\"Convert Monorail StatusDef PB to API Status", "'-' means this issue is being removed elif id_str.startswith('-'): result.append('-%s:%s'", "meansOpen=status.means_open, description=status.status_docstring) def convert_label(label): \"\"\"Convert Monorail LabelDef PB to API", "[] field_id_dict = { fd.field_id: fd.field_name for fd in config.field_defs}", "def convert_field_values(field_values, mar, services): \"\"\"Convert user passed in field value", "exist', fv.fieldName) continue if fv.operator == api_pb2_v1.FieldValueOperator.clear: fv_list_clear.append(field_def.field_id) continue #", "All rights reserved. # Use of this source code is", "in config.well_known_statuses], labels=[convert_label(l) for l in config.well_known_labels], prompts=[convert_template(t) for t", "at # https://developers.google.com/open-source/licenses/bsd \"\"\"Convert Monorail PB objects to API PB", "% (ir.projectId, ir.issueId)) return result else: return None def convert_issue(cls,", "== tracker_pb2.FieldID.LABELS: result.labels = amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CC: for", "ids are filtered out. issues = services.issue.GetIssues(mar.cnxn, issue_ids) result =", "continue val = None if fv.user_id: val = _get_user_email( services.user,", "BSD-style # license that can be found in the LICENSE", "return api_pb2_v1.IssueCommentWrapper( attachments=[convert_attachment(a) for a in comment.attachments], author=convert_person(comment.user_id, mar.cnxn, services,", "elif amendment.field == tracker_pb2.FieldID.STATUS: result.status = amendment.newvalue elif amendment.field ==", "= None local_id = None if ':' in pair: pair_ary", "project_name = services.project.LookupProjectNames( mar.cnxn, [cd.project_id])[cd.project_id] user_ids = set() user_ids.update( cd.admin_ids", "tracker_pb2.FieldID.COMPONENTS: result.components = amendment.newvalue.split() elif amendment.field == tracker_pb2.FieldID.CUSTOM: fv =", "\"\"\"Get user email.\"\"\" try: user_email = user_service.LookupUserEmail( cnxn, user_id) if", "services): \"\"\"Convert API IssueRef PBs to global issue ids.\"\"\" if", "logging.warning(str(ex)) return None else: raise ex days_ago = None if", "amendment.added_user_ids[0]) elif amendment.field == tracker_pb2.FieldID.LABELS: result.labels = amendment.newvalue.split() elif amendment.field", "[cd.modifier_id]) user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids)) component = api_pb2_v1.Component( componentId=cd.component_id, projectName=project_name,", "elif amendment.field == tracker_pb2.FieldID.LABELS: result.labels = amendment.newvalue.split() elif amendment.field ==", "user_ids = set() user_ids.update( cd.admin_ids + cd.cc_ids + [cd.creator_id] +", "% project_name) issue_project_id = project.project_id else: issue_project_id = project_id local_id", "framework import framework_helpers from framework import permissions from framework import", "int(pair_ary[1]) project = services.project.GetProjectByName(mar.cnxn, project_name) if not project: raise project_svc.NoSuchProjectException(", "Copyright 2016 The Chromium Authors. All rights reserved. # Use", "last_visit_days_ago=days_ago, email_bouncing=bool(user.email_bounce_timestamp), vacation_message=user.vacation_message) def convert_issue_ids(issue_ids, mar, services): \"\"\"Convert global issue", "elif fv.str_value: val = fv.str_value elif fv.int_value: val = str(fv.int_value)", "issue_project, issue, granted_perms=granted_perms), fieldValues=field_values_list) if issue.closed_timestamp > 0: resp.closed =", "= services.project.GetProject(mar.cnxn, issue.project_id) component_list = [] for cd in config.component_defs:", "api_pb2_v1.Attachment( attachmentId=attachment.attachment_id, fileName=attachment.filename, fileSize=attachment.filesize, mimetype=attachment.mimetype, isDeleted=attachment.deleted) def convert_amendments(issue, amendments, mar,", "== tracker_pb2.FieldID.SUMMARY: result.summary = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.STATUS: result.status", "= cls( kind='monorail#issue', id=issue.local_id, title=issue.summary, summary=issue.summary, projectId=issue_project.project_name, stars=issue.star_count, starred=services.issue_star.IsItemStarredBy( mar.cnxn,", "Monorail ProjectIssueConfig PB to API ProjectIssueConfig PB.\"\"\" return api_pb2_v1.ProjectIssueConfig( kind='monorail#projectIssueConfig',", "amendment.newvalue elif amendment.field == tracker_pb2.FieldID.OWNER: if len(amendment.added_user_ids) == 0: result.owner", "issue.owner_modified_timestamp: resp.owner_modified = datetime.datetime.fromtimestamp( issue.owner_modified_timestamp) if issue.status_modified_timestamp: resp.status_modified = datetime.datetime.fromtimestamp(", "field_values_list = [] field_id_dict = { fd.field_id: fd.field_name for fd", "if not project: raise project_svc.NoSuchProjectException( 'Project %s does not exist'", "tracker_pb2.FieldTypes.USER_TYPE: try: new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue) except user_svc.NoSuchUserException: new_fv.user_id =", "SQL queries to fetch projects/issues. def issue_global_ids(project_local_id_pairs, project_id, mar, services):", "of does not exist', fv.fieldName) continue if fv.operator == api_pb2_v1.FieldValueOperator.clear:", "import tracker_bizobj from tracker import tracker_helpers def convert_project(project, config, role):", "if ir.projectId: project = services.project.GetProjectByName( mar.cnxn, ir.projectId) if project: project_id", "vacation_message=user.vacation_message) def convert_issue_ids(issue_ids, mar, services): \"\"\"Convert global issue ids to", "user id to API AtomPerson PB.\"\"\" if not user_id: return", "user_email = framework_constants.DELETED_USER_NAME except user_svc.NoSuchUserException: user_email = framework_constants.DELETED_USER_NAME return user_email", "if item.startswith('-'): list_to_remove.append(item[1:]) else: list_to_add.append(item) return list_to_add, list_to_remove # TODO(sheyang):", "to <project>:<id> format.\"\"\" result = [] id_list = issue_ids.split() for", "global issue ids.\"\"\" if issueref_pbs: result = [] for ir", "return result def _get_user_email(user_service, cnxn, user_id): \"\"\"Get user email.\"\"\" try:", "PB.\"\"\" # missed issue ids are filtered out. issues =", "Use of this source code is govered by a BSD-style", "_get_user_email( services.user, mar.cnxn, user_id) result.cc.append(user_email) for user_id in amendment.removed_user_ids: user_email", "= [name.lower() for name in component_names] result = [] for", "trap_exception=True), canDelete=can_delete, content=comment.content, deletedBy=convert_person(comment.deleted_by, mar.cnxn, services, trap_exception=True), id=comment.sequence, published=datetime.datetime.fromtimestamp(comment.timestamp), updates=convert_amendments(issue,", "htmlLink='/p/%s/' % project.project_name, summary=project.summary, description=project.description, role=role, issuesConfig=convert_project_config(config)) def convert_project_config(config): \"\"\"Convert", "if amendment.field == tracker_pb2.FieldID.SUMMARY: result.summary = amendment.newvalue elif amendment.field ==", "cd.path if cpath.lower() in component_names_lower: result.append(cd.component_id) return result def convert_field_values(field_values,", "PB objects\"\"\" import datetime import logging import time from framework", "= services.issue.GetIssues(mar.cnxn, issue_ids) result = [] for issue in issues:", "PB.\"\"\" return api_pb2_v1.Prompt( name=template.name, title=template.summary, description=template.content, titleMustBeEdited=template.summary_must_be_edited, status=template.status, labels=template.labels, membersOnly=template.members_only,", "mar.cnxn, fv.user_id) elif fv.str_value: val = fv.str_value elif fv.int_value: val", "proto import project_pb2 from proto import tracker_pb2 from services import", "== tracker_pb2.FieldID.CUSTOM: fv = api_pb2_v1.FieldValue() fv.fieldName = amendment.custom_field_name fv.fieldValue =", "tracker_bizobj from tracker import tracker_helpers def convert_project(project, config, role): \"\"\"Convert", "= mar.project_id if ir.projectId: project = services.project.GetProjectByName( mar.cnxn, ir.projectId) if", "= _get_user_email( services.user, mar.cnxn, user_id) result.cc.append(user_email) for user_id in amendment.removed_user_ids:", "last_sync_time=setting.last_sync_time) def convert_component_def(cd, mar, services): \"\"\"Convert ComponentDef PB to Component", "= api_pb2_v1.FieldValue() fv.fieldName = amendment.custom_field_name fv.fieldValue = amendment.newvalue result.fieldValues.append(fv) return", "mar, services): \"\"\"Convert ComponentDef PB to Component PB.\"\"\" project_name =", "None local_id = None if ':' in pair: pair_ary =", "field_values: field_def = field_name_dict.get(fv.fieldName) if not field_def: logging.warning('Custom field %s", "= datetime.datetime.fromtimestamp(issue.closed_timestamp) if issue.merged_into: resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0] if issue.owner_modified_timestamp: resp.owner_modified", "tracker_pb2.FieldTypes.ENUM_TYPE: raw_val = '%s-%s' % (fv.fieldName, fv.fieldValue) if fv.operator ==", "ex: if trap_exception: logging.warning(str(ex)) return None else: raise ex days_ago", "= cd.component_id if cid in issue.component_ids: component_list.append(cd.path) cc_list = [convert_person(p,", "component_names): \"\"\"Convert a list of component names to ids.\"\"\" component_names_lower", "issue ids to API IssueRef PB.\"\"\" # missed issue ids", "LICENSE file or at # https://developers.google.com/open-source/licenses/bsd \"\"\"Convert Monorail PB objects", "result.owner = framework_constants.NO_USER_NAME else: result.owner = _get_user_email( services.user, mar.cnxn, amendment.added_user_ids[0])", "objects\"\"\" import datetime import logging import time from framework import", "result.append('-%s:%s' % (project_name, id_str[1:])) else: result.append('%s:%s' % (project_name, id_str)) return", "format.\"\"\" result = [] id_list = issue_ids.split() for id_str in", "in config.field_defs} for fv in issue.field_values: field_name = field_id_dict.get(fv.field_id) if", "= { fd.field_name: fd for fd in mar.config.field_defs} for fv", "== api_pb2_v1.FieldValueOperator.add: label_list_add.append(raw_val) else: logging.warning('Unsupported field value operater %s', fv.operator)", "label=label.label, description=label.label_docstring) def convert_template(template): \"\"\"Convert Monorail TemplateDef PB to API", "for issue in issues: issue_ref = api_pb2_v1.IssueRef( issueId=issue.local_id, projectId=issue.project_name, kind='monorail#issueRef')", "mar.perms, comment.deleted_by, comment.user_id, mar.project, permissions.GetRestrictions(issue), granted_perms=granted_perms) return api_pb2_v1.IssueCommentWrapper( attachments=[convert_attachment(a) for", "pair in project_local_id_pairs: issue_project_id = None local_id = None if", "Chromium Authors. All rights reserved. # Use of this source", "result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate') for amendment in amendments: if amendment.field ==", "= None if user.last_visit_timestamp: secs_ago = int(time.time()) - user.last_visit_timestamp days_ago", "/ framework_constants.SECS_PER_DAY return api_pb2_v1.AtomPerson( kind='monorail#issuePerson', name=user.email, htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id),", "resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp) if issue.merged_into: resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0] if issue.owner_modified_timestamp:", "= permissions.CanDelete( mar.auth.user_id, mar.auth.effective_ids, mar.perms, comment.deleted_by, comment.user_id, mar.project, permissions.GetRestrictions(issue), granted_perms=granted_perms)", "fd.field_name for fd in config.field_defs} for fv in issue.field_values: field_name", "amendment in amendments: if amendment.field == tracker_pb2.FieldID.SUMMARY: result.summary = amendment.newvalue", "tracker_pb2.FieldID.STATUS: result.status = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.OWNER: if len(amendment.added_user_ids)", "= tracker_pb2.FieldValue( field_id=field_def.field_id) if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE: try: new_fv.user_id =", "project %s does not exist', fv.field_id, issue_project.project_name) continue val =", "found in the LICENSE file or at # https://developers.google.com/open-source/licenses/bsd \"\"\"Convert", "issue_project.project_name) continue val = None if fv.user_id: val = _get_user_email(", "PB to API IssuesGetInsertResponse.\"\"\" config = services.config.GetProjectConfig(mar.cnxn, issue.project_id) granted_perms =", "code is govered by a BSD-style # license that can", "field_name_dict = { fd.field_name: fd for fd in mar.config.field_defs} for", "return list_to_add, list_to_remove # TODO(sheyang): batch the SQL queries to", "not project: raise project_svc.NoSuchProjectException( 'Project %s does not exist' %", "list to FieldValue PB, or labels.\"\"\" fv_list_add = [] fv_list_remove", "from framework import framework_constants from framework import framework_helpers from framework", "user = services.user.GetUser(cnxn, user_id) except user_svc.NoSuchUserException as ex: if trap_exception:", "def convert_amendments(issue, amendments, mar, services): \"\"\"Convert a list of Monorail", "tracker_pb2.FieldID.OWNER: if len(amendment.added_user_ids) == 0: result.owner = framework_constants.NO_USER_NAME else: result.owner", "== tracker_pb2.FieldID.STATUS: result.status = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.OWNER: if", "int(time.time()) - user.last_visit_timestamp days_ago = secs_ago / framework_constants.SECS_PER_DAY return api_pb2_v1.AtomPerson(", "or labels.\"\"\" fv_list_add = [] fv_list_remove = [] fv_list_clear =", "= framework_constants.DELETED_USER_NAME except user_svc.NoSuchUserException: user_email = framework_constants.DELETED_USER_NAME return user_email def", "in item_list: if item.startswith('-'): list_to_remove.append(item[1:]) else: list_to_add.append(item) return list_to_add, list_to_remove", "mar.perms, issue_project, issue, granted_perms=granted_perms), canEdit=permissions.CanEditIssue( mar.auth.effective_ids, mar.perms, issue_project, issue, granted_perms=granted_perms),", "fv.fieldValue elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE: new_fv.int_value = int(fv.fieldValue) else: logging.warning(", "None if fv.user_id: val = _get_user_email( services.user, mar.cnxn, fv.user_id) elif", "https://developers.google.com/open-source/licenses/bsd \"\"\"Convert Monorail PB objects to API PB objects\"\"\" import", "amendment.added_user_ids: user_email = _get_user_email( services.user, mar.cnxn, user_id) result.cc.append(user_email) for user_id", "prompts=[convert_template(t) for t in config.templates], defaultPromptForMembers=config.default_template_for_developers, defaultPromptForNonMembers=config.default_template_for_users) def convert_status(status): \"\"\"Convert", "import tracker_pb2 from services import issue_svc from services import project_svc", "user email.\"\"\" try: user_email = user_service.LookupUserEmail( cnxn, user_id) if not", "days_ago = None if user.last_visit_timestamp: secs_ago = int(time.time()) - user.last_visit_timestamp", "import logging import time from framework import framework_constants from framework", "from services import user_svc from tracker import tracker_bizobj from tracker", "import tracker_helpers def convert_project(project, config, role): \"\"\"Convert Monorail Project PB", "comment, mar, services, granted_perms): \"\"\"Convert Monorail IssueComment PB to API", "amendment.field == tracker_pb2.FieldID.MERGEDINTO: result.mergedInto = amendment.newvalue elif amendment.field == tracker_pb2.FieldID.COMPONENTS:", "for s in config.well_known_statuses], labels=[convert_label(l) for l in config.well_known_labels], prompts=[convert_template(t)", "issue.field_values: field_name = field_id_dict.get(fv.field_id) if not field_name: logging.warning('Custom field %d", "if cd.created: component.created = datetime.datetime.fromtimestamp(cd.created) component.creator = user_names_dict[cd.creator_id] if cd.modified:", "PB to API ProjectIssueConfig PB.\"\"\" return api_pb2_v1.ProjectIssueConfig( kind='monorail#projectIssueConfig', restrictToKnown=config.restrict_to_known, defaultColumns=config.default_col_spec.split(),", "+ [cd.creator_id] + [cd.modifier_id]) user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids)) component =", "that can be found in the LICENSE file or at", "of project %s does not exist', fv.field_id, issue_project.project_name) continue val", "services), kind='monorail#issueComment') def convert_attachment(attachment): \"\"\"Convert Monorail Attachment PB to API", "Monorail Amendment PBs to API Update.\"\"\" result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate') for", "to API ProjectIssueConfig PB.\"\"\" return api_pb2_v1.ProjectIssueConfig( kind='monorail#projectIssueConfig', restrictToKnown=config.restrict_to_known, defaultColumns=config.default_col_spec.split(), defaultSorting=config.default_sort_spec.split(),", "comment.deleted_by, comment.user_id, mar.project, permissions.GetRestrictions(issue), granted_perms=granted_perms) return api_pb2_v1.IssueCommentWrapper( attachments=[convert_attachment(a) for a", "titleMustBeEdited=template.summary_must_be_edited, status=template.status, labels=template.labels, membersOnly=template.members_only, defaultToMember=template.owner_defaults_to_member, componentRequired=template.component_required) def convert_person(user_id, cnxn, services,", "elif id_str.startswith('-'): result.append('-%s:%s' % (project_name, id_str[1:])) else: result.append('%s:%s' % (project_name,", "== api_pb2_v1.FieldValueOperator.clear: fv_list_clear.append(field_def.field_id) continue # Enum fields are stored as", "api_pb2_v1.Label( label=label.label, description=label.label_docstring) def convert_template(template): \"\"\"Convert Monorail TemplateDef PB to", "projectName=project_name, componentPath=cd.path, description=cd.docstring, admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]), cc=sorted([user_names_dict[uid] for", "field_name = field_id_dict.get(fv.field_id) if not field_name: logging.warning('Custom field %d of", "component.creator = user_names_dict[cd.creator_id] if cd.modified: component.modified = datetime.datetime.fromtimestamp(cd.modified) component.modifier =", "datetime.datetime.fromtimestamp(issue.closed_timestamp) if issue.merged_into: resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0] if issue.owner_modified_timestamp: resp.owner_modified =", "mar, services): \"\"\"Convert API IssueRef PBs to global issue ids.\"\"\"", "issue_project_id = project_id local_id = int(pair) result.append( services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id))", "component_names] result = [] for cd in config.component_defs: cpath =", "val = None if fv.user_id: val = _get_user_email( services.user, mar.cnxn,", "tracker_pb2.FieldID.BLOCKEDON: result.blockedOn = _append_project( amendment.newvalue, issue.project_name) elif amendment.field == tracker_pb2.FieldID.BLOCKING:", "passed in field value list to FieldValue PB, or labels.\"\"\"", "fv.fieldName) continue if fv.operator == api_pb2_v1.FieldValueOperator.clear: fv_list_clear.append(field_def.field_id) continue # Enum", "resp.component_modified = datetime.datetime.fromtimestamp( issue.component_modified_timestamp) return resp def convert_comment(issue, comment, mar,", "from services import issue_svc from services import project_svc from services", "import user_svc from tracker import tracker_bizobj from tracker import tracker_helpers", "if user.last_visit_timestamp: secs_ago = int(time.time()) - user.last_visit_timestamp days_ago = secs_ago", "services.config.GetProjectConfig(mar.cnxn, issue.project_id) granted_perms = tracker_bizobj.GetGrantedPerms( issue, mar.auth.effective_ids, config) issue_project =", "component_names_lower = [name.lower() for name in component_names] result = []", "ir.issueId) result.append(issue.issue_id) except issue_svc.NoSuchIssueException: logging.warning( 'Issue (%s:%d) does not exist.'", "elif amendment.field == tracker_pb2.FieldID.BLOCKEDON: result.blockedOn = _append_project( amendment.newvalue, issue.project_name) elif", "issue_svc.NoSuchIssueException: logging.warning( 'Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId))", "to API IssueRef PB.\"\"\" # missed issue ids are filtered", "cc_list = [convert_person(p, mar.cnxn, services) for p in issue.cc_ids] cc_list", "[] for cd in config.component_defs: cpath = cd.path if cpath.lower()", "fv.operator == api_pb2_v1.FieldValueOperator.add: fv_list_add.append(new_fv) else: logging.warning('Unsupported field value operater %s',", "in issues: issue_ref = api_pb2_v1.IssueRef( issueId=issue.local_id, projectId=issue.project_name, kind='monorail#issueRef') result.append(issue_ref) return", "are filtered out. issues = services.issue.GetIssues(mar.cnxn, issue_ids) result = []", "reserved. # Use of this source code is govered by" ]
[ "{source_description} def get_subject(self): return self.triple[0] def get_object(self): return self.triple[2] def", "self.labels_indexer.index_triples(target_entities) self.relation=target_entities.get_relation() predictions = list(map(self._infer_single, descriptions_list)) per_entity_predictions = self.consolidate(predictions) per_entity_predictions", "inferred facts in case of functional predicates :param per_entity_prediction: :return:", "\"\"\" def quality_method(p): return p.get_quality(self.quality, self.quality_aggregation) per_entity_prediction_filtered = defaultdict(list) for", "topk=topk, with_weight=True, with_description=False, quality=self.quality) if target_entities and clear_target_entities: self.labels_indexer.drop() return", "= out_filepath + '.parsable' out_filepath_with_type = out_filepath + ('.%s' %", "Combine predictions from different rules :param predictions: list of generated", "return out_filepath_with_type if __name__ == '__main__': target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv') vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql',", "+ str(self.get_main_description()) def __repr__(self): return \"%s\\t(\\t%s,%s)\" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources))", "from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended", "excut.clustering import target_entities as tes class Prediction: \"\"\" An object", "def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None, clear_target_entities=True): \"\"\" Infer", "\"\"\" Infer new facts for a giving set of descriptions", "pass class SparqlBasedDeductionEngineExtended(DeductionEngine): \"\"\" Deduction engine that converts the rules", "quality_method(p): return p.get_quality(self.quality, self.quality_aggregation) per_entity_prediction_filtered = defaultdict(list) for sub, per_obj_predictions", "per_var_predictions.items(): if topk > 0: predictions = predictions[:topk] for p", "import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended from excut.kg.kg_indexing import Indexer from excut.kg.utils.data_formating import", "self.query_executer = kg_query_interface self.quality = quality self.quality_aggregation = quality_aggregation self.labels_indexer=Indexer(store=kg_query_interface.type,", "ExplanationStructure from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended from excut.kg.kg_indexing import Indexer", "import n3_repr from excut.utils.logging import logger from excut.kg.utils.Constants import DEFUALT_AUX_RELATION", "min_quality=0, topk=-1, output_filepath=None, clear_target_entities=True): \"\"\" Infer new facts for a", "source_description=Description(), all_sources=None): def __init__(self, triple=None, sources=None): self.triple = triple #", "predictions (optional) by default the exclusive coverage of the rules", "the predictions :param topk: k *distinct* highest quality predictions per", "assignments \"\"\" if isinstance(descriptions_list,dict): descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values())) if target_entities: self.labels_indexer.index_triples(target_entities) self.relation=target_entities.get_relation() predictions", "the other with .parsable extension that can be parsed in", "out_file_parsable = out_filepath + '.parsable' out_filepath_with_type = out_filepath + ('.%s'", "out_str = str(p) out_file.write(out_str) out_file.write('\\n') with open(out_file_parsable + ('.%s' %", ":param triple_format: :param topk: :param with_weight: :param with_description: :return: \"\"\"", "same triple \"\"\" # def __init__(self, triple: tuple, source_description=Description(), all_sources=None):", ":param out_filepath: :param triple_format: :param topk: :param with_weight: :param with_description:", "class Prediction: \"\"\" An object to represent the prediction of", "def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max): \"\"\" :param kg_query_interface:", "# I only output normalized_coverage out_str = n3_repr(p.triple) + ('\\t%f'", "takes care of consolidating similar predictions \"\"\" def __init__(self, kg_query_interface:", "key=lambda d: d.get_quality(measure)) def __str__(self): return str(self.triple) + '<<' +", "min_quality: minimum aggregated quality for the predictions :param topk: k", "target_entities: self.labels_indexer.index_triples(target_entities) self.relation=target_entities.get_relation() predictions = list(map(self._infer_single, descriptions_list)) per_entity_predictions = self.consolidate(predictions)", "import target_entities as tes class Prediction: \"\"\" An object to", "explantions/descriptions rules :param target_entities: entities and their labels for which", "in python. :param per_var_predictions: :param out_filepath: :param triple_format: :param topk:", "out_filepath + '.parsable' out_filepath_with_type = out_filepath + ('.%s' % quality", "if len(quality) > 0 else ''), 'w') as out_file: out_file.write('\\n'.join(", "for ranking the predictions (optional) by default the exclusive coverage", "from excut.clustering import target_entities as tes class Prediction: \"\"\" An", "self.all_sources]) def get_main_description(self, measure='x_coverage', method=max): return method(self.all_sources, key=lambda d: d.get_quality(measure))", "__init__(self, triple: tuple, source_description=Description(), all_sources=None): def __init__(self, triple=None, sources=None): self.triple", "reverse=True) include = topk if topk > 0 else len(merged_predictions)", "objective quality measure for ranking the predictions (optional) by default", ":return: \"\"\" def quality_method(p): return p.get_quality(self.quality, self.quality_aggregation) per_entity_prediction_filtered = defaultdict(list)", "predictions): \"\"\" Combine predictions from different rules :param predictions: list", "# per_var_predictions[p.get_subject()][p.get_object()].append(p) per_entity_predictions = defaultdict(lambda: defaultdict(Prediction)) for p in list(chain.from_iterable(predictions)):", "% (self.__class__.__name__, repr(self.triple), repr(self.all_sources)) def __eq__(self, other): return other.triple ==", "list(per_obj_predictions.values()))) merged_predictions.sort(key=quality_method, reverse=True) include = topk if topk > 0", "from different rules :param predictions: list of generated predictions :return:", "excut.explanations_mining.descriptions import dump_explanations_to_file from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file from", "for aggregating the score if multiple rules infers the same", "a giving set of descriptions :param descriptions_list: list of explantions/descriptions", "the prediction of the rules :ivar triple: the predicted triple", "for var, predictions in per_var_predictions.items(): if topk > 0: predictions", "consolidating similar predictions \"\"\" def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage',", "facts for the given Description :param description: :return: \"\"\" bindings", "Deduction engine that converts the rules to sparql and fire", "interface for the KG. :param relation: the relation used in", "the first is human readable and the other with .parsable", "'') + ( '\\t%s' % p.source_description if with_description else '')", "endpoint=kg_query_interface.endpoint, graph= kg_query_interface.labels_graph, identifier=kg_query_interface.labels_identifier) def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1,", "per_entity_prediction.items(): # print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()])", "per_entity_prediction_filtered[sub] = merged_predictions[:include] return per_entity_prediction_filtered def _infer_single(self, description: Description2): \"\"\"", "self.triple def __hash__(self): return hash(self.triple) class DeductionEngine(): \"\"\" Abstract rulebased_deduction/inference", "\"%s\\t(\\t%s,%s)\" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources)) def __eq__(self, other): return other.triple", "graph= kg_query_interface.labels_graph, identifier=kg_query_interface.labels_identifier) def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None,", "[Prediction((b, head.predicate, head.object), [description]) for b in bindings] return predictions", "as out_file: out_file.write('\\n'.join( map(str, chain.from_iterable(map(lambda l: l[:topk] if topk >", ":param per_var_predictions: :param out_filepath: :param triple_format: :param topk: :param with_weight:", "__name__ == '__main__': target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv') vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql', ['http://yago-expr.org', 'http://yago-expr.org.types'], labels_identifier='http://yago-expr.org.labels')", "the relation used in the predicted triple (optional) :param quality:", "inference (rulebased_deduction engine) \"\"\" import itertools from collections import defaultdict", "the same fact (optional) by default max is used. \"\"\"", "b in bindings] return predictions def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1,", "qaulity_method(p)) for k, p in per_obj_predictions.items()]) merged_predictions = list( filter(lambda", "different rules :param predictions: list of generated predictions :return: combined", "used in the predicted triple (optional) :param quality: objective quality", ":param target_entities: entities and their labels for which predictions are", "of explantions/descriptions rules :param target_entities: entities and their labels for", "Atom, load_from_file from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended,", "self.relation=target_entities.get_relation() predictions = list(map(self._infer_single, descriptions_list)) per_entity_predictions = self.consolidate(predictions) per_entity_predictions =", "return p.get_quality(self.quality, self.quality_aggregation) per_entity_prediction_filtered = defaultdict(list) for sub, per_obj_predictions in", "0: predictions = predictions[:topk] for p in predictions: if triple_format:", "rules :param target_entities: entities and their labels for which predictions", "list( filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values()))) merged_predictions.sort(key=quality_method, reverse=True) include", "= EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql', ['http://yago-expr.org', 'http://yago-expr.org.types'], labels_identifier='http://yago-expr.org.labels') explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE})", "predictions = predictions[:topk] for p in predictions: if triple_format: #", "__init__(self, triple=None, sources=None): self.triple = triple # self.source_description = source_descriptionf", "are generated :param min_quality: minimum aggregated quality for the predictions", "load_from_file from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure", "output_filepath: dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True, with_description=False, quality=self.quality) if target_entities", "and clear_target_entities: self.labels_indexer.drop() return per_entity_predictions def consolidate(self, predictions): \"\"\" Combine", "An object to represent the prediction of the rules :ivar", "quality_aggregation self.labels_indexer=Indexer(store=kg_query_interface.type, endpoint=kg_query_interface.endpoint, graph= kg_query_interface.labels_graph, identifier=kg_query_interface.labels_identifier) def infer(self, descriptions_list, target_entities=None,", "file. :param clear_target_entities: clear indexed target entities after done inference", "from excut.kg.kg_indexing import Indexer from excut.kg.utils.data_formating import n3_repr from excut.utils.logging", "'') else: out_str = str(p) out_file.write(out_str) out_file.write('\\n') with open(out_file_parsable +", "\"\"\" def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max): \"\"\" :param", "output normalized_coverage out_str = n3_repr(p.triple) + ('\\t%f' % p.get_quality(quality) if", "entities and their labels for which predictions are generated :param", "fire them over the KG. The rule-based_deduction takes care of", "engine) \"\"\" import itertools from collections import defaultdict from itertools", "\"\"\" This module contains the rule-based inference (rulebased_deduction engine) \"\"\"", "clear_target_entities=True): \"\"\" Infer new facts for a giving set of", "predicted entity-clusters assignments \"\"\" if isinstance(descriptions_list,dict): descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values())) if target_entities: self.labels_indexer.index_triples(target_entities)", "clear_target_entities: clear indexed target entities after done inference :return: dictionary", "def _infer_single(self, description: Description2): \"\"\" Infer new facts for the", ":return: combined single prediction with several sources for equivalent predictions", "out_file.write(out_str) out_file.write('\\n') with open(out_file_parsable + ('.%s' % quality if len(quality)", "triple=None, sources=None): self.triple = triple # self.source_description = source_descriptionf self.all_sources", "bindings] return predictions def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True, with_description=False,", "descriptions_list: list of explantions/descriptions rules :param target_entities: entities and their", "return per_entity_predictions def consolidate(self, predictions): \"\"\" Combine predictions from different", "''), 'w') as out_file: out_file.write('\\n'.join( map(str, chain.from_iterable(map(lambda l: l[:topk] if", "\"\"\" super(SparqlBasedDeductionEngineExtended, self).__init__() self.relation = relation self.query_executer = kg_query_interface self.quality", "EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended from excut.kg.kg_indexing import Indexer from excut.kg.utils.data_formating import n3_repr", "return self.triple[0] def get_object(self): return self.triple[2] def get_quality(self, measure='x_coverage', method=max):", "predictions def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True, with_description=False, quality='x_coverage'): \"\"\"", "\"\"\" out_file_parsable = out_filepath + '.parsable' out_filepath_with_type = out_filepath +", "head.predicate, head.object), [description]) for b in bindings] return predictions def", "method=max): # return self.source_description.get_quality(measure) return method([source.get_quality(measure) for source in self.all_sources])", ":ivar triple: the predicted triple :ivar all_sources: all rules that", "indexed target entities after done inference :return: dictionary of predicted", "len(quality) > 0 else '') with open(out_filepath_with_type, 'w') as out_file:", "per_entity_prediction_filtered def _infer_single(self, description: Description2): \"\"\" Infer new facts for", "> 0 else '') with open(out_filepath_with_type, 'w') as out_file: for", "infers the same fact (optional) by default max is used.", "topk=-1, output_filepath=None, clear_target_entities=True): \"\"\" Infer new facts for a giving", ":param quality_aggregation: the methd used for aggregating the score if", "0 else '') with open(out_filepath_with_type, 'w') as out_file: for var,", "if target_entities and clear_target_entities: self.labels_indexer.drop() return per_entity_predictions def consolidate(self, predictions):", "predictions[:topk] for p in predictions: if triple_format: # I only", "for b in bindings] return predictions def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True,", "get_main_description(self, measure='x_coverage', method=max): return method(self.all_sources, key=lambda d: d.get_quality(measure)) def __str__(self):", "repr(self.triple), repr(self.all_sources)) def __eq__(self, other): return other.triple == self.triple def", "engine. \"\"\" def __init__(self, **kwargs): pass def infer(self, descriptions, recursive=False,", "repr(self.all_sources)) def __eq__(self, other): return other.triple == self.triple def __hash__(self):", "score if multiple rules infers the same fact (optional) by", "'.parsable' out_filepath_with_type = out_filepath + ('.%s' % quality if len(quality)", "explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE}) explans=explainer.explain(target_entities, output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt') ded = SparqlBasedDeductionEngineExtended(vos_executer)", "entity, :param output_filepath: predictions output file. :param clear_target_entities: clear indexed", "measure='x_coverage', method=max): # return self.source_description.get_quality(measure) return method([source.get_quality(measure) for source in", "sub, per_obj_predictions in per_entity_prediction.items(): # print([(k, p.triple[2], qaulity_method(p)) for k,", "descriptions :param descriptions_list: list of explantions/descriptions rules :param target_entities: entities", "Infer new facts for a giving set of descriptions :param", "self.labels_indexer=Indexer(store=kg_query_interface.type, endpoint=kg_query_interface.endpoint, graph= kg_query_interface.labels_graph, identifier=kg_query_interface.labels_identifier) def infer(self, descriptions_list, target_entities=None, min_quality=0,", "out_filepath: :param triple_format: :param topk: :param with_weight: :param with_description: :return:", "var, predictions in per_var_predictions.items(): if topk > 0: predictions =", "rules that predicted the same triple \"\"\" # def __init__(self,", "chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else l, per_var_predictions.values())))))", "collections import defaultdict from itertools import chain from excut.explanations_mining.descriptions import", "predictions output file. :param clear_target_entities: clear indexed target entities after", "topk if topk > 0 else len(merged_predictions) per_entity_prediction_filtered[sub] = merged_predictions[:include]", "_infer_single(self, description: Description2): \"\"\" Infer new facts for the given", "'http://yago-expr.org.types'], labels_identifier='http://yago-expr.org.labels') explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE}) explans=explainer.explain(target_entities, output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt') ded", "the same triple \"\"\" # def __init__(self, triple: tuple, source_description=Description(),", "= merged_predictions[:include] return per_entity_prediction_filtered def _infer_single(self, description: Description2): \"\"\" Infer", "prediction of the rules :ivar triple: the predicted triple :ivar", "(optional) :param quality: objective quality measure for ranking the predictions", "len(quality) > 0 else ''), 'w') as out_file: out_file.write('\\n'.join( map(str,", "target_entities as tes class Prediction: \"\"\" An object to represent", "to two files, the first is human readable and the", "+= p.all_sources return per_entity_predictions def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1): \"\"\"", "l[:topk] if topk > 0 else l, per_var_predictions.values()))))) return out_filepath_with_type", "in bindings] return predictions def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True,", "aggregating the score if multiple rules infers the same fact", ":param min_quality: minimum aggregated quality for the predictions :param topk:", "that predicted the same triple \"\"\" # def __init__(self, triple:", "the exclusive coverage of the rules is used :param quality_aggregation:", "def __repr__(self): return \"%s\\t(\\t%s,%s)\" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources)) def __eq__(self,", "import chain from excut.explanations_mining.descriptions import dump_explanations_to_file from excut.explanations_mining.descriptions_new import Description2,", "the rule-based inference (rulebased_deduction engine) \"\"\" import itertools from collections", "relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max): \"\"\" :param kg_query_interface: interface for the KG.", "import Description2, Atom, load_from_file from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended from excut.explanations_mining.simple_miner.description_miner_extended", "triple_format=True, topk=-1, with_weight=True, with_description=False, quality='x_coverage'): \"\"\" Writes the predictions to", "self.query_executer.get_arguments_bindings(description, restriction_pattern=Description2(body=[Atom('?x', self.relation, '?z')])) head = description.head # only supports", "def __init__(self, triple=None, sources=None): self.triple = triple # self.source_description =", "> threshold, list(per_obj_predictions.values()))) merged_predictions.sort(key=quality_method, reverse=True) include = topk if topk", "KG. The rule-based_deduction takes care of consolidating similar predictions \"\"\"", "of the rules is used :param quality_aggregation: the methd used", "triple # self.source_description = source_descriptionf self.all_sources = sources if sources", "with_weight else '') + ( '\\t%s' % p.source_description if with_description", "kg_query_interface.labels_graph, identifier=kg_query_interface.labels_identifier) def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None, clear_target_entities=True):", "cons_pred = per_entity_predictions[p.get_subject()][p.get_object()] cons_pred.triple = p.triple cons_pred.all_sources += p.all_sources return", "dict \"\"\" # per_var_predictions = defaultdict(lambda: defaultdict(list)) # for p", "p.get_quality(self.quality, self.quality_aggregation) per_entity_prediction_filtered = defaultdict(list) for sub, per_obj_predictions in per_entity_prediction.items():", "parsed in python. :param per_var_predictions: :param out_filepath: :param triple_format: :param", "output_filepath: predictions output file. :param clear_target_entities: clear indexed target entities", "return per_entity_predictions def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1): \"\"\" Merge the", "default the exclusive coverage of the rules is used :param", "the given Description :param description: :return: \"\"\" bindings = self.query_executer.get_arguments_bindings(description,", "inference :return: dictionary of predicted entity-clusters assignments \"\"\" if isinstance(descriptions_list,dict):", "return predictions def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True, with_description=False, quality='x_coverage'):", "def __hash__(self): return hash(self.triple) class DeductionEngine(): \"\"\" Abstract rulebased_deduction/inference engine.", "= ded.infer(explans, target_entities, output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv') logger.info(\"Total variables with predictions subjects: %i\",", "out_filepath + ('.%s' % quality if len(quality) > 0 else", "clear indexed target entities after done inference :return: dictionary of", "print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()]) merged_predictions =", ":param per_entity_prediction: :return: \"\"\" def quality_method(p): return p.get_quality(self.quality, self.quality_aggregation) per_entity_prediction_filtered", "and the other with .parsable extension that can be parsed", "predictions: list of generated predictions :return: combined single prediction with", "topk > 0 else len(merged_predictions) per_entity_prediction_filtered[sub] = merged_predictions[:include] return per_entity_prediction_filtered", "rule-based_deduction takes care of consolidating similar predictions \"\"\" def __init__(self,", "per_entity_predictions def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1): \"\"\" Merge the the", "defaultdict(Prediction)) for p in list(chain.from_iterable(predictions)): cons_pred = per_entity_predictions[p.get_subject()][p.get_object()] cons_pred.triple =", "for which predictions are generated :param min_quality: minimum aggregated quality", "combined single prediction with several sources for equivalent predictions :rtype:", "for equivalent predictions :rtype: dict \"\"\" # per_var_predictions = defaultdict(lambda:", "per_entity_predictions[p.get_subject()][p.get_object()] cons_pred.triple = p.triple cons_pred.all_sources += p.all_sources return per_entity_predictions def", "l: l[:topk] if topk > 0 else l, per_var_predictions.values()))))) return", "with_weight=True, with_description=False, quality=self.quality) if target_entities and clear_target_entities: self.labels_indexer.drop() return per_entity_predictions", "return hash(self.triple) class DeductionEngine(): \"\"\" Abstract rulebased_deduction/inference engine. \"\"\" def", "topk: k *distinct* highest quality predictions per entity, :param output_filepath:", "( '\\t%s' % p.source_description if with_description else '') else: out_str", "return other.triple == self.triple def __hash__(self): return hash(self.triple) class DeductionEngine():", "for p in chain.from_iterable(predictions): # per_var_predictions[p.get_subject()][p.get_object()].append(p) per_entity_predictions = defaultdict(lambda: defaultdict(Prediction))", "rules to sparql and fire them over the KG. The", "threshold, list(per_obj_predictions.values()))) merged_predictions.sort(key=quality_method, reverse=True) include = topk if topk >", "functional predicates :param per_entity_prediction: :return: \"\"\" def quality_method(p): return p.get_quality(self.quality,", "infer(self, descriptions, recursive=False, topk=-1): pass class SparqlBasedDeductionEngineExtended(DeductionEngine): \"\"\" Deduction engine", "quality=self.quality) if target_entities and clear_target_entities: self.labels_indexer.drop() return per_entity_predictions def consolidate(self,", "single prediction with several sources for equivalent predictions :rtype: dict", "itertools import chain from excut.explanations_mining.descriptions import dump_explanations_to_file from excut.explanations_mining.descriptions_new import", "def __init__(self, **kwargs): pass def infer(self, descriptions, recursive=False, topk=-1): pass", "isinstance(descriptions_list,dict): descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values())) if target_entities: self.labels_indexer.index_triples(target_entities) self.relation=target_entities.get_relation() predictions = list(map(self._infer_single, descriptions_list))", "relation: the relation used in the predicted triple (optional) :param", "target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv') vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql', ['http://yago-expr.org', 'http://yago-expr.org.types'], labels_identifier='http://yago-expr.org.labels') explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4,", "kg_query_interface: interface for the KG. :param relation: the relation used", "from excut.kg.utils.Constants import DEFUALT_AUX_RELATION from excut.clustering import target_entities as tes", "__repr__(self): return \"%s\\t(\\t%s,%s)\" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources)) def __eq__(self, other):", "p.get_quality(quality) if with_weight else '') + ( '\\t%s' % p.source_description", "per_var_predictions = defaultdict(lambda: defaultdict(list)) # for p in chain.from_iterable(predictions): #", "identifier=kg_query_interface.labels_identifier) def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None, clear_target_entities=True): \"\"\"", "in chain.from_iterable(predictions): # per_var_predictions[p.get_subject()][p.get_object()].append(p) per_entity_predictions = defaultdict(lambda: defaultdict(Prediction)) for p", "out_filepath, triple_format=True, topk=-1, with_weight=True, with_description=False, quality='x_coverage'): \"\"\" Writes the predictions", "target_entities: entities and their labels for which predictions are generated", "quality self.quality_aggregation = quality_aggregation self.labels_indexer=Indexer(store=kg_query_interface.type, endpoint=kg_query_interface.endpoint, graph= kg_query_interface.labels_graph, identifier=kg_query_interface.labels_identifier) def", "if triple_format: # I only output normalized_coverage out_str = n3_repr(p.triple)", "new facts for the given Description :param description: :return: \"\"\"", "Description2, Atom, load_from_file from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended from excut.explanations_mining.simple_miner.description_miner_extended import", "with_weight: :param with_description: :return: \"\"\" out_file_parsable = out_filepath + '.parsable'", "else '') else: out_str = str(p) out_file.write(out_str) out_file.write('\\n') with open(out_file_parsable", "from itertools import chain from excut.explanations_mining.descriptions import dump_explanations_to_file from excut.explanations_mining.descriptions_new", "topk: :param with_weight: :param with_description: :return: \"\"\" out_file_parsable = out_filepath", "in per_var_predictions.items(): if topk > 0: predictions = predictions[:topk] for", "same fact (optional) by default max is used. \"\"\" super(SparqlBasedDeductionEngineExtended,", "self.relation = relation self.query_executer = kg_query_interface self.quality = quality self.quality_aggregation", "= kg_query_interface self.quality = quality self.quality_aggregation = quality_aggregation self.labels_indexer=Indexer(store=kg_query_interface.type, endpoint=kg_query_interface.endpoint,", "self.quality = quality self.quality_aggregation = quality_aggregation self.labels_indexer=Indexer(store=kg_query_interface.type, endpoint=kg_query_interface.endpoint, graph= kg_query_interface.labels_graph,", "of predicted entity-clusters assignments \"\"\" if isinstance(descriptions_list,dict): descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values())) if target_entities:", "sources for equivalent predictions :rtype: dict \"\"\" # per_var_predictions =", "only output normalized_coverage out_str = n3_repr(p.triple) + ('\\t%f' % p.get_quality(quality)", "triple \"\"\" # def __init__(self, triple: tuple, source_description=Description(), all_sources=None): def", "sparql and fire them over the KG. The rule-based_deduction takes", "else l, per_var_predictions.values()))))) return out_filepath_with_type if __name__ == '__main__': target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv')", "all rules that predicted the same triple \"\"\" # def", "minimum aggregated quality for the predictions :param topk: k *distinct*", "with several sources for equivalent predictions :rtype: dict \"\"\" #", "defaultdict(lambda: defaultdict(list)) # for p in chain.from_iterable(predictions): # per_var_predictions[p.get_subject()][p.get_object()].append(p) per_entity_predictions", "from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure from", "+ ( '\\t%s' % p.source_description if with_description else '') else:", "DeductionEngine(): \"\"\" Abstract rulebased_deduction/inference engine. \"\"\" def __init__(self, **kwargs): pass", "ded.infer(explans, target_entities, output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv') logger.info(\"Total variables with predictions subjects: %i\", len(per_var_predictions))", "triple_format: # I only output normalized_coverage out_str = n3_repr(p.triple) +", "# for p in chain.from_iterable(predictions): # per_var_predictions[p.get_subject()][p.get_object()].append(p) per_entity_predictions = defaultdict(lambda:", "dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True, with_description=False, quality=self.quality) if target_entities and", "excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended from excut.kg.kg_indexing import Indexer from excut.kg.utils.data_formating", ":param topk: k *distinct* highest quality predictions per entity, :param", "SparqlBasedDeductionEngineExtended(DeductionEngine): \"\"\" Deduction engine that converts the rules to sparql", "class DeductionEngine(): \"\"\" Abstract rulebased_deduction/inference engine. \"\"\" def __init__(self, **kwargs):", "import itertools from collections import defaultdict from itertools import chain", "Merge the the inferred facts in case of functional predicates", "quality='x_coverage', quality_aggregation=max): \"\"\" :param kg_query_interface: interface for the KG. :param", "(self.__class__.__name__, repr(self.triple), repr(self.all_sources)) def __eq__(self, other): return other.triple == self.triple", "of the rules :ivar triple: the predicted triple :ivar all_sources:", ":param kg_query_interface: interface for the KG. :param relation: the relation", "max is used. \"\"\" super(SparqlBasedDeductionEngineExtended, self).__init__() self.relation = relation self.query_executer", "of descriptions :param descriptions_list: list of explantions/descriptions rules :param target_entities:", "str(self.get_main_description()) def __repr__(self): return \"%s\\t(\\t%s,%s)\" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources)) def", ":param output_filepath: predictions output file. :param clear_target_entities: clear indexed target", "of consolidating similar predictions \"\"\" def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION,", "= predictions[:topk] for p in predictions: if triple_format: # I", "(rulebased_deduction engine) \"\"\" import itertools from collections import defaultdict from", "Abstract rulebased_deduction/inference engine. \"\"\" def __init__(self, **kwargs): pass def infer(self,", "quality_aggregation: the methd used for aggregating the score if multiple", "get_subject(self): return self.triple[0] def get_object(self): return self.triple[2] def get_quality(self, measure='x_coverage',", "several sources for equivalent predictions :rtype: dict \"\"\" # per_var_predictions", "KGQueryInterfaceExtended from excut.kg.kg_indexing import Indexer from excut.kg.utils.data_formating import n3_repr from", "topk > 0 else l, per_var_predictions.values()))))) return out_filepath_with_type if __name__", "self.relation, '?z')])) head = description.head # only supports p(?x,CONSTANT) predictions", "% quality if len(quality) > 0 else '') with open(out_filepath_with_type,", "predictions in per_var_predictions.items(): if topk > 0: predictions = predictions[:topk]", "that converts the rules to sparql and fire them over", "EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql', ['http://yago-expr.org', 'http://yago-expr.org.types'], labels_identifier='http://yago-expr.org.labels') explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE}) explans=explainer.explain(target_entities,", "p: quality_method(p) > threshold, list(per_obj_predictions.values()))) merged_predictions.sort(key=quality_method, reverse=True) include = topk", "__str__(self): return str(self.triple) + '<<' + str(self.get_main_description()) def __repr__(self): return", "list of explantions/descriptions rules :param target_entities: entities and their labels", "p in list(chain.from_iterable(predictions)): cons_pred = per_entity_predictions[p.get_subject()][p.get_object()] cons_pred.triple = p.triple cons_pred.all_sources", "topk > 0: predictions = predictions[:topk] for p in predictions:", "if output_filepath: dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True, with_description=False, quality=self.quality) if", "self.quality_aggregation) per_entity_prediction_filtered = defaultdict(list) for sub, per_obj_predictions in per_entity_prediction.items(): #", "else ''), 'w') as out_file: out_file.write('\\n'.join( map(str, chain.from_iterable(map(lambda l: l[:topk]", "if len(quality) > 0 else '') with open(out_filepath_with_type, 'w') as", "def get_subject(self): return self.triple[0] def get_object(self): return self.triple[2] def get_quality(self,", "Prediction: \"\"\" An object to represent the prediction of the", "# print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()]) merged_predictions", "l, per_var_predictions.values()))))) return out_filepath_with_type if __name__ == '__main__': target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv') vos_executer", "as tes class Prediction: \"\"\" An object to represent the", "in list(chain.from_iterable(predictions)): cons_pred = per_entity_predictions[p.get_subject()][p.get_object()] cons_pred.triple = p.triple cons_pred.all_sources +=", "(optional) by default the exclusive coverage of the rules is", "topk=-1): pass class SparqlBasedDeductionEngineExtended(DeductionEngine): \"\"\" Deduction engine that converts the", "== '__main__': target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv') vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql', ['http://yago-expr.org', 'http://yago-expr.org.types'], labels_identifier='http://yago-expr.org.labels') explainer=PathBasedClustersExplainerExtended(vos_executer,", "= self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk) if output_filepath: dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk,", "sources=None): self.triple = triple # self.source_description = source_descriptionf self.all_sources =", "defaultdict(lambda: defaultdict(Prediction)) for p in list(chain.from_iterable(predictions)): cons_pred = per_entity_predictions[p.get_subject()][p.get_object()] cons_pred.triple", "out_file.write('\\n') with open(out_file_parsable + ('.%s' % quality if len(quality) >", "# only supports p(?x,CONSTANT) predictions = [Prediction((b, head.predicate, head.object), [description])", "threshold=0, topk=-1): \"\"\" Merge the the inferred facts in case", "import dump_explanations_to_file from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file from excut.explanations_mining.explaining_engines_extended", "out_filepath_with_type = out_filepath + ('.%s' % quality if len(quality) >", "method=max): return method(self.all_sources, key=lambda d: d.get_quality(measure)) def __str__(self): return str(self.triple)", "= SparqlBasedDeductionEngineExtended(vos_executer) per_var_predictions = ded.infer(explans, target_entities, output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv') logger.info(\"Total variables with", "tuple, source_description=Description(), all_sources=None): def __init__(self, triple=None, sources=None): self.triple = triple", "> 0: predictions = predictions[:topk] for p in predictions: if", "exclusive coverage of the rules is used :param quality_aggregation: the", "and their labels for which predictions are generated :param min_quality:", "len(merged_predictions) per_entity_prediction_filtered[sub] = merged_predictions[:include] return per_entity_prediction_filtered def _infer_single(self, description: Description2):", "other): return other.triple == self.triple def __hash__(self): return hash(self.triple) class", "def __eq__(self, other): return other.triple == self.triple def __hash__(self): return", "__hash__(self): return hash(self.triple) class DeductionEngine(): \"\"\" Abstract rulebased_deduction/inference engine. \"\"\"", "tes class Prediction: \"\"\" An object to represent the prediction", "('.%s' % quality if len(quality) > 0 else '') with", "0 else l, per_var_predictions.values()))))) return out_filepath_with_type if __name__ == '__main__':", "recursive=False, topk=-1): pass class SparqlBasedDeductionEngineExtended(DeductionEngine): \"\"\" Deduction engine that converts", "sources else {source_description} def get_subject(self): return self.triple[0] def get_object(self): return", "is used :param quality_aggregation: the methd used for aggregating the", "def get_object(self): return self.triple[2] def get_quality(self, measure='x_coverage', method=max): # return", "k, p in per_obj_predictions.items()]) merged_predictions = list( filter(lambda p: quality_method(p)", "PathBasedClustersExplainerExtended from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended,", "return self.source_description.get_quality(measure) return method([source.get_quality(measure) for source in self.all_sources]) def get_main_description(self,", "d: d.get_quality(measure)) def __str__(self): return str(self.triple) + '<<' + str(self.get_main_description())", "predictions: if triple_format: # I only output normalized_coverage out_str =", "new facts for a giving set of descriptions :param descriptions_list:", "the score if multiple rules infers the same fact (optional)", "= quality_aggregation self.labels_indexer=Indexer(store=kg_query_interface.type, endpoint=kg_query_interface.endpoint, graph= kg_query_interface.labels_graph, identifier=kg_query_interface.labels_identifier) def infer(self, descriptions_list,", "'\\t%s' % p.source_description if with_description else '') else: out_str =", "restriction_pattern=Description2(body=[Atom('?x', self.relation, '?z')])) head = description.head # only supports p(?x,CONSTANT)", "def quality_method(p): return p.get_quality(self.quality, self.quality_aggregation) per_entity_prediction_filtered = defaultdict(list) for sub,", "generated :param min_quality: minimum aggregated quality for the predictions :param", ":param clear_target_entities: clear indexed target entities after done inference :return:", "chain.from_iterable(predictions): # per_var_predictions[p.get_subject()][p.get_object()].append(p) per_entity_predictions = defaultdict(lambda: defaultdict(Prediction)) for p in", "% p.source_description if with_description else '') else: out_str = str(p)", "by default max is used. \"\"\" super(SparqlBasedDeductionEngineExtended, self).__init__() self.relation =", "case of functional predicates :param per_entity_prediction: :return: \"\"\" def quality_method(p):", "contains the rule-based inference (rulebased_deduction engine) \"\"\" import itertools from", "def __str__(self): return str(self.triple) + '<<' + str(self.get_main_description()) def __repr__(self):", "p.triple cons_pred.all_sources += p.all_sources return per_entity_predictions def _merge_and_sort_cut(self, per_entity_prediction, threshold=0,", "given Description :param description: :return: \"\"\" bindings = self.query_executer.get_arguments_bindings(description, restriction_pattern=Description2(body=[Atom('?x',", "'w') as out_file: for var, predictions in per_var_predictions.items(): if topk", "Description :param description: :return: \"\"\" bindings = self.query_executer.get_arguments_bindings(description, restriction_pattern=Description2(body=[Atom('?x', self.relation,", "if sources else list() # sources if sources else {source_description}", ":param topk: :param with_weight: :param with_description: :return: \"\"\" out_file_parsable =", "can be parsed in python. :param per_var_predictions: :param out_filepath: :param", ":return: \"\"\" out_file_parsable = out_filepath + '.parsable' out_filepath_with_type = out_filepath", "% quality if len(quality) > 0 else ''), 'w') as", "kg_query_interface self.quality = quality self.quality_aggregation = quality_aggregation self.labels_indexer=Indexer(store=kg_query_interface.type, endpoint=kg_query_interface.endpoint, graph=", "with .parsable extension that can be parsed in python. :param", "# sources if sources else {source_description} def get_subject(self): return self.triple[0]", "import PathBasedClustersExplainerExtended from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure from excut.kg.kg_query_interface_extended import", "rulebased_deduction/inference engine. \"\"\" def __init__(self, **kwargs): pass def infer(self, descriptions,", "by default the exclusive coverage of the rules is used", "care of consolidating similar predictions \"\"\" def __init__(self, kg_query_interface: KGQueryInterfaceExtended,", "with open(out_file_parsable + ('.%s' % quality if len(quality) > 0", "> 0 else ''), 'w') as out_file: out_file.write('\\n'.join( map(str, chain.from_iterable(map(lambda", "\"\"\" def __init__(self, **kwargs): pass def infer(self, descriptions, recursive=False, topk=-1):", "p(?x,CONSTANT) predictions = [Prediction((b, head.predicate, head.object), [description]) for b in", "str(p) out_file.write(out_str) out_file.write('\\n') with open(out_file_parsable + ('.%s' % quality if", "topk=topk) if output_filepath: dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True, with_description=False, quality=self.quality)", "is human readable and the other with .parsable extension that", "= source_descriptionf self.all_sources = sources if sources else list() #", "as out_file: for var, predictions in per_var_predictions.items(): if topk >", "list(chain.from_iterable(predictions)): cons_pred = per_entity_predictions[p.get_subject()][p.get_object()] cons_pred.triple = p.triple cons_pred.all_sources += p.all_sources", "target entities after done inference :return: dictionary of predicted entity-clusters", "# self.source_description = source_descriptionf self.all_sources = sources if sources else", "get_quality(self, measure='x_coverage', method=max): # return self.source_description.get_quality(measure) return method([source.get_quality(measure) for source", "measure='x_coverage', method=max): return method(self.all_sources, key=lambda d: d.get_quality(measure)) def __str__(self): return", "else: out_str = str(p) out_file.write(out_str) out_file.write('\\n') with open(out_file_parsable + ('.%s'", "merged_predictions[:include] return per_entity_prediction_filtered def _infer_single(self, description: Description2): \"\"\" Infer new", "rules is used :param quality_aggregation: the methd used for aggregating", "the rules :ivar triple: the predicted triple :ivar all_sources: all", "**kwargs): pass def infer(self, descriptions, recursive=False, topk=-1): pass class SparqlBasedDeductionEngineExtended(DeductionEngine):", "for p in list(chain.from_iterable(predictions)): cons_pred = per_entity_predictions[p.get_subject()][p.get_object()] cons_pred.triple = p.triple", "head.object), [description]) for b in bindings] return predictions def dump_predictions_map(per_var_predictions,", "4, 'structure': ExplanationStructure.TREE}) explans=explainer.explain(target_entities, output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt') ded = SparqlBasedDeductionEngineExtended(vos_executer) per_var_predictions =", "and fire them over the KG. The rule-based_deduction takes care", "SparqlBasedDeductionEngineExtended(vos_executer) per_var_predictions = ded.infer(explans, target_entities, output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv') logger.info(\"Total variables with predictions", "self.source_description = source_descriptionf self.all_sources = sources if sources else list()", "for sub, per_obj_predictions in per_entity_prediction.items(): # print([(k, p.triple[2], qaulity_method(p)) for", "= p.triple cons_pred.all_sources += p.all_sources return per_entity_predictions def _merge_and_sort_cut(self, per_entity_prediction,", "filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values()))) merged_predictions.sort(key=quality_method, reverse=True) include =", "if topk > 0 else l, per_var_predictions.values()))))) return out_filepath_with_type if", "p.source_description if with_description else '') else: out_str = str(p) out_file.write(out_str)", "class SparqlBasedDeductionEngineExtended(DeductionEngine): \"\"\" Deduction engine that converts the rules to", "__init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max): \"\"\" :param kg_query_interface: interface", "description: Description2): \"\"\" Infer new facts for the given Description", "if sources else {source_description} def get_subject(self): return self.triple[0] def get_object(self):", "= list(map(self._infer_single, descriptions_list)) per_entity_predictions = self.consolidate(predictions) per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality,", "used :param quality_aggregation: the methd used for aggregating the score", "the predictions to two files, the first is human readable", ":ivar all_sources: all rules that predicted the same triple \"\"\"", "human readable and the other with .parsable extension that can", "for p in predictions: if triple_format: # I only output", "object to represent the prediction of the rules :ivar triple:", "other with .parsable extension that can be parsed in python.", "self.triple[2] def get_quality(self, measure='x_coverage', method=max): # return self.source_description.get_quality(measure) return method([source.get_quality(measure)", "dictionary of predicted entity-clusters assignments \"\"\" if isinstance(descriptions_list,dict): descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values())) if", "used. \"\"\" super(SparqlBasedDeductionEngineExtended, self).__init__() self.relation = relation self.query_executer = kg_query_interface", "is used. \"\"\" super(SparqlBasedDeductionEngineExtended, self).__init__() self.relation = relation self.query_executer =", "import DEFUALT_AUX_RELATION from excut.clustering import target_entities as tes class Prediction:", "highest quality predictions per entity, :param output_filepath: predictions output file.", "cons_pred.triple = p.triple cons_pred.all_sources += p.all_sources return per_entity_predictions def _merge_and_sort_cut(self,", "Infer new facts for the given Description :param description: :return:", "_merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1): \"\"\" Merge the the inferred facts", "defaultdict(list) for sub, per_obj_predictions in per_entity_prediction.items(): # print([(k, p.triple[2], qaulity_method(p))", "= self.consolidate(predictions) per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk) if output_filepath: dump_predictions_map(per_entity_predictions,", "itertools from collections import defaultdict from itertools import chain from", "cons_pred.all_sources += p.all_sources return per_entity_predictions def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1):", "> 0 else len(merged_predictions) per_entity_prediction_filtered[sub] = merged_predictions[:include] return per_entity_prediction_filtered def", "methd used for aggregating the score if multiple rules infers", "% p.get_quality(quality) if with_weight else '') + ( '\\t%s' %", "predictions = list(map(self._infer_single, descriptions_list)) per_entity_predictions = self.consolidate(predictions) per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions,", "[description]) for b in bindings] return predictions def dump_predictions_map(per_var_predictions, out_filepath,", "out_file: out_file.write('\\n'.join( map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0", "self.source_description.get_quality(measure) return method([source.get_quality(measure) for source in self.all_sources]) def get_main_description(self, measure='x_coverage',", "facts in case of functional predicates :param per_entity_prediction: :return: \"\"\"", "self.consolidate(predictions) per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk) if output_filepath: dump_predictions_map(per_entity_predictions, output_filepath,", "per_entity_prediction_filtered = defaultdict(list) for sub, per_obj_predictions in per_entity_prediction.items(): # print([(k,", "relation used in the predicted triple (optional) :param quality: objective", "predicted triple (optional) :param quality: objective quality measure for ranking", "return str(self.triple) + '<<' + str(self.get_main_description()) def __repr__(self): return \"%s\\t(\\t%s,%s)\"", "descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values())) if target_entities: self.labels_indexer.index_triples(target_entities) self.relation=target_entities.get_relation() predictions = list(map(self._infer_single, descriptions_list)) per_entity_predictions", "else list() # sources if sources else {source_description} def get_subject(self):", "entity-clusters assignments \"\"\" if isinstance(descriptions_list,dict): descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values())) if target_entities: self.labels_indexer.index_triples(target_entities) self.relation=target_entities.get_relation()", "predictions :param topk: k *distinct* highest quality predictions per entity,", "= topk if topk > 0 else len(merged_predictions) per_entity_prediction_filtered[sub] =", "self.triple = triple # self.source_description = source_descriptionf self.all_sources = sources", "giving set of descriptions :param descriptions_list: list of explantions/descriptions rules", "in predictions: if triple_format: # I only output normalized_coverage out_str", ":param relation: the relation used in the predicted triple (optional)", "the rules is used :param quality_aggregation: the methd used for", "= description.head # only supports p(?x,CONSTANT) predictions = [Prediction((b, head.predicate,", "explans=explainer.explain(target_entities, output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt') ded = SparqlBasedDeductionEngineExtended(vos_executer) per_var_predictions = ded.infer(explans, target_entities, output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv')", "quality for the predictions :param topk: k *distinct* highest quality", "defaultdict(list)) # for p in chain.from_iterable(predictions): # per_var_predictions[p.get_subject()][p.get_object()].append(p) per_entity_predictions =", "triple_format=True, topk=topk, with_weight=True, with_description=False, quality=self.quality) if target_entities and clear_target_entities: self.labels_indexer.drop()", "\"\"\" Infer new facts for the given Description :param description:", "return method(self.all_sources, key=lambda d: d.get_quality(measure)) def __str__(self): return str(self.triple) +", "rules infers the same fact (optional) by default max is", "files, the first is human readable and the other with", "+ ('.%s' % quality if len(quality) > 0 else '')", "+ ('\\t%f' % p.get_quality(quality) if with_weight else '') + (", "['http://yago-expr.org', 'http://yago-expr.org.types'], labels_identifier='http://yago-expr.org.labels') explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE}) explans=explainer.explain(target_entities, output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt')", ":param description: :return: \"\"\" bindings = self.query_executer.get_arguments_bindings(description, restriction_pattern=Description2(body=[Atom('?x', self.relation, '?z')]))", "else '') + ( '\\t%s' % p.source_description if with_description else", "all_sources=None): def __init__(self, triple=None, sources=None): self.triple = triple # self.source_description", "if isinstance(descriptions_list,dict): descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values())) if target_entities: self.labels_indexer.index_triples(target_entities) self.relation=target_entities.get_relation() predictions = list(map(self._infer_single,", "self.all_sources = sources if sources else list() # sources if", "two files, the first is human readable and the other", "'structure': ExplanationStructure.TREE}) explans=explainer.explain(target_entities, output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt') ded = SparqlBasedDeductionEngineExtended(vos_executer) per_var_predictions = ded.infer(explans,", "converts the rules to sparql and fire them over the", "'w') as out_file: out_file.write('\\n'.join( map(str, chain.from_iterable(map(lambda l: l[:topk] if topk", "logger from excut.kg.utils.Constants import DEFUALT_AUX_RELATION from excut.clustering import target_entities as", "with_description=False, quality='x_coverage'): \"\"\" Writes the predictions to two files, the", "output_filepath, triple_format=True, topk=topk, with_weight=True, with_description=False, quality=self.quality) if target_entities and clear_target_entities:", "out_file: for var, predictions in per_var_predictions.items(): if topk > 0:", "include = topk if topk > 0 else len(merged_predictions) per_entity_prediction_filtered[sub]", "fact (optional) by default max is used. \"\"\" super(SparqlBasedDeductionEngineExtended, self).__init__()", "def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True, with_description=False, quality='x_coverage'): \"\"\" Writes", "similar predictions \"\"\" def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max):", "list(map(self._infer_single, descriptions_list)) per_entity_predictions = self.consolidate(predictions) per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk)", "return method([source.get_quality(measure) for source in self.all_sources]) def get_main_description(self, measure='x_coverage', method=max):", "= defaultdict(lambda: defaultdict(list)) # for p in chain.from_iterable(predictions): # per_var_predictions[p.get_subject()][p.get_object()].append(p)", "per entity, :param output_filepath: predictions output file. :param clear_target_entities: clear", "per_entity_predictions = self.consolidate(predictions) per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk) if output_filepath:", "= str(p) out_file.write(out_str) out_file.write('\\n') with open(out_file_parsable + ('.%s' % quality", "p.all_sources return per_entity_predictions def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1): \"\"\" Merge", "if with_description else '') else: out_str = str(p) out_file.write(out_str) out_file.write('\\n')", "prediction with several sources for equivalent predictions :rtype: dict \"\"\"", "done inference :return: dictionary of predicted entity-clusters assignments \"\"\" if", "infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None, clear_target_entities=True): \"\"\" Infer new", "for k, p in per_obj_predictions.items()]) merged_predictions = list( filter(lambda p:", "else '') with open(out_filepath_with_type, 'w') as out_file: for var, predictions", "(optional) by default max is used. \"\"\" super(SparqlBasedDeductionEngineExtended, self).__init__() self.relation", "n3_repr(p.triple) + ('\\t%f' % p.get_quality(quality) if with_weight else '') +", "quality: objective quality measure for ranking the predictions (optional) by", "0 else ''), 'w') as out_file: out_file.write('\\n'.join( map(str, chain.from_iterable(map(lambda l:", "def __init__(self, triple: tuple, source_description=Description(), all_sources=None): def __init__(self, triple=None, sources=None):", "def consolidate(self, predictions): \"\"\" Combine predictions from different rules :param", "predictions per entity, :param output_filepath: predictions output file. :param clear_target_entities:", "descriptions_list)) per_entity_predictions = self.consolidate(predictions) per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk) if", "\"\"\" # def __init__(self, triple: tuple, source_description=Description(), all_sources=None): def __init__(self,", "the predicted triple :ivar all_sources: all rules that predicted the", "return \"%s\\t(\\t%s,%s)\" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources)) def __eq__(self, other): return", "+ '.parsable' out_filepath_with_type = out_filepath + ('.%s' % quality if", "with open(out_filepath_with_type, 'w') as out_file: for var, predictions in per_var_predictions.items():", "sources if sources else {source_description} def get_subject(self): return self.triple[0] def", "excut.kg.utils.Constants import DEFUALT_AUX_RELATION from excut.clustering import target_entities as tes class", "\"\"\" import itertools from collections import defaultdict from itertools import", "measure for ranking the predictions (optional) by default the exclusive", "triple: the predicted triple :ivar all_sources: all rules that predicted", "head = description.head # only supports p(?x,CONSTANT) predictions = [Prediction((b,", "'<<' + str(self.get_main_description()) def __repr__(self): return \"%s\\t(\\t%s,%s)\" % (self.__class__.__name__, repr(self.triple),", "\"\"\" # per_var_predictions = defaultdict(lambda: defaultdict(list)) # for p in", "open(out_file_parsable + ('.%s' % quality if len(quality) > 0 else", "+ ('.%s' % quality if len(quality) > 0 else ''),", "target_entities and clear_target_entities: self.labels_indexer.drop() return per_entity_predictions def consolidate(self, predictions): \"\"\"", "sources if sources else list() # sources if sources else", "get_object(self): return self.triple[2] def get_quality(self, measure='x_coverage', method=max): # return self.source_description.get_quality(measure)", "for source in self.all_sources]) def get_main_description(self, measure='x_coverage', method=max): return method(self.all_sources,", "return per_entity_prediction_filtered def _infer_single(self, description: Description2): \"\"\" Infer new facts", "of functional predicates :param per_entity_prediction: :return: \"\"\" def quality_method(p): return", "__eq__(self, other): return other.triple == self.triple def __hash__(self): return hash(self.triple)", "source in self.all_sources]) def get_main_description(self, measure='x_coverage', method=max): return method(self.all_sources, key=lambda", "in per_entity_prediction.items(): # print([(k, p.triple[2], qaulity_method(p)) for k, p in", "default max is used. \"\"\" super(SparqlBasedDeductionEngineExtended, self).__init__() self.relation = relation", "self.labels_indexer.drop() return per_entity_predictions def consolidate(self, predictions): \"\"\" Combine predictions from", ":param with_weight: :param with_description: :return: \"\"\" out_file_parsable = out_filepath +", "super(SparqlBasedDeductionEngineExtended, self).__init__() self.relation = relation self.query_executer = kg_query_interface self.quality =", "if topk > 0 else len(merged_predictions) per_entity_prediction_filtered[sub] = merged_predictions[:include] return", "self.quality_aggregation = quality_aggregation self.labels_indexer=Indexer(store=kg_query_interface.type, endpoint=kg_query_interface.endpoint, graph= kg_query_interface.labels_graph, identifier=kg_query_interface.labels_identifier) def infer(self,", "engine that converts the rules to sparql and fire them", "in self.all_sources]) def get_main_description(self, measure='x_coverage', method=max): return method(self.all_sources, key=lambda d:", "method([source.get_quality(measure) for source in self.all_sources]) def get_main_description(self, measure='x_coverage', method=max): return", "p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()]) merged_predictions = list(", "= sources if sources else list() # sources if sources", "The rule-based_deduction takes care of consolidating similar predictions \"\"\" def", ":param predictions: list of generated predictions :return: combined single prediction", "\"\"\" Writes the predictions to two files, the first is", "source_descriptionf self.all_sources = sources if sources else list() # sources", "after done inference :return: dictionary of predicted entity-clusters assignments \"\"\"", "= defaultdict(list) for sub, per_obj_predictions in per_entity_prediction.items(): # print([(k, p.triple[2],", "excut.kg.kg_indexing import Indexer from excut.kg.utils.data_formating import n3_repr from excut.utils.logging import", "\"\"\" if isinstance(descriptions_list,dict): descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values())) if target_entities: self.labels_indexer.index_triples(target_entities) self.relation=target_entities.get_relation() predictions =", "+ '<<' + str(self.get_main_description()) def __repr__(self): return \"%s\\t(\\t%s,%s)\" % (self.__class__.__name__,", "KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max): \"\"\" :param kg_query_interface: interface for the", "\"\"\" Merge the the inferred facts in case of functional", "clear_target_entities: self.labels_indexer.drop() return per_entity_predictions def consolidate(self, predictions): \"\"\" Combine predictions", ":param quality: objective quality measure for ranking the predictions (optional)", "per_entity_predictions def consolidate(self, predictions): \"\"\" Combine predictions from different rules", "predicates :param per_entity_prediction: :return: \"\"\" def quality_method(p): return p.get_quality(self.quality, self.quality_aggregation)", "the methd used for aggregating the score if multiple rules", "quality if len(quality) > 0 else ''), 'w') as out_file:", "the KG. :param relation: the relation used in the predicted", "def infer(self, descriptions, recursive=False, topk=-1): pass class SparqlBasedDeductionEngineExtended(DeductionEngine): \"\"\" Deduction", "of generated predictions :return: combined single prediction with several sources", "other.triple == self.triple def __hash__(self): return hash(self.triple) class DeductionEngine(): \"\"\"", "language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE}) explans=explainer.explain(target_entities, output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt') ded = SparqlBasedDeductionEngineExtended(vos_executer) per_var_predictions", "descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None, clear_target_entities=True): \"\"\" Infer new facts", "excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended from", "the rules to sparql and fire them over the KG.", "method(self.all_sources, key=lambda d: d.get_quality(measure)) def __str__(self): return str(self.triple) + '<<'", "\"\"\" :param kg_query_interface: interface for the KG. :param relation: the", "k *distinct* highest quality predictions per entity, :param output_filepath: predictions", "= out_filepath + ('.%s' % quality if len(quality) > 0", "per_var_predictions: :param out_filepath: :param triple_format: :param topk: :param with_weight: :param", "description: :return: \"\"\" bindings = self.query_executer.get_arguments_bindings(description, restriction_pattern=Description2(body=[Atom('?x', self.relation, '?z')])) head", "= self.query_executer.get_arguments_bindings(description, restriction_pattern=Description2(body=[Atom('?x', self.relation, '?z')])) head = description.head # only", "the predicted triple (optional) :param quality: objective quality measure for", "else len(merged_predictions) per_entity_prediction_filtered[sub] = merged_predictions[:include] return per_entity_prediction_filtered def _infer_single(self, description:", "represent the prediction of the rules :ivar triple: the predicted", "module contains the rule-based inference (rulebased_deduction engine) \"\"\" import itertools", ":param descriptions_list: list of explantions/descriptions rules :param target_entities: entities and", "# per_var_predictions = defaultdict(lambda: defaultdict(list)) # for p in chain.from_iterable(predictions):", "('.%s' % quality if len(quality) > 0 else ''), 'w')", "ded = SparqlBasedDeductionEngineExtended(vos_executer) per_var_predictions = ded.infer(explans, target_entities, output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv') logger.info(\"Total variables", "merged_predictions.sort(key=quality_method, reverse=True) include = topk if topk > 0 else", "excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure from excut.kg.kg_query_interface_extended", "quality_aggregation=max): \"\"\" :param kg_query_interface: interface for the KG. :param relation:", "that can be parsed in python. :param per_var_predictions: :param out_filepath:", "with_description: :return: \"\"\" out_file_parsable = out_filepath + '.parsable' out_filepath_with_type =", "import DescriptionMinerExtended, ExplanationStructure from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended from excut.kg.kg_indexing", "self).__init__() self.relation = relation self.query_executer = kg_query_interface self.quality = quality", "return self.triple[2] def get_quality(self, measure='x_coverage', method=max): # return self.source_description.get_quality(measure) return", "coverage of the rules is used :param quality_aggregation: the methd", "per_obj_predictions in per_entity_prediction.items(): # print([(k, p.triple[2], qaulity_method(p)) for k, p", "merged_predictions = list( filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values()))) merged_predictions.sort(key=quality_method,", "pass def infer(self, descriptions, recursive=False, topk=-1): pass class SparqlBasedDeductionEngineExtended(DeductionEngine): \"\"\"", "aggregated quality for the predictions :param topk: k *distinct* highest", "('\\t%f' % p.get_quality(quality) if with_weight else '') + ( '\\t%s'", "if topk > 0: predictions = predictions[:topk] for p in", "for a giving set of descriptions :param descriptions_list: list of", "equivalent predictions :rtype: dict \"\"\" # per_var_predictions = defaultdict(lambda: defaultdict(list))", "= defaultdict(lambda: defaultdict(Prediction)) for p in list(chain.from_iterable(predictions)): cons_pred = per_entity_predictions[p.get_subject()][p.get_object()]", "triple :ivar all_sources: all rules that predicted the same triple", "== self.triple def __hash__(self): return hash(self.triple) class DeductionEngine(): \"\"\" Abstract", "for the KG. :param relation: the relation used in the", "predictions to two files, the first is human readable and", "bindings = self.query_executer.get_arguments_bindings(description, restriction_pattern=Description2(body=[Atom('?x', self.relation, '?z')])) head = description.head #", "= relation self.query_executer = kg_query_interface self.quality = quality self.quality_aggregation =", ".parsable extension that can be parsed in python. :param per_var_predictions:", "sources else list() # sources if sources else {source_description} def", "set of descriptions :param descriptions_list: list of explantions/descriptions rules :param", "be parsed in python. :param per_var_predictions: :param out_filepath: :param triple_format:", "the predictions (optional) by default the exclusive coverage of the", "Indexer from excut.kg.utils.data_formating import n3_repr from excut.utils.logging import logger from", "from excut.utils.logging import logger from excut.kg.utils.Constants import DEFUALT_AUX_RELATION from excut.clustering", "topk=-1, with_weight=True, with_description=False, quality='x_coverage'): \"\"\" Writes the predictions to two", "with_description=False, quality=self.quality) if target_entities and clear_target_entities: self.labels_indexer.drop() return per_entity_predictions def", "relation self.query_executer = kg_query_interface self.quality = quality self.quality_aggregation = quality_aggregation", "first is human readable and the other with .parsable extension", "ExplanationStructure.TREE}) explans=explainer.explain(target_entities, output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt') ded = SparqlBasedDeductionEngineExtended(vos_executer) per_var_predictions = ded.infer(explans, target_entities,", "rules :ivar triple: the predicted triple :ivar all_sources: all rules", "ranking the predictions (optional) by default the exclusive coverage of", "excut.utils.logging import logger from excut.kg.utils.Constants import DEFUALT_AUX_RELATION from excut.clustering import", "with_weight=True, with_description=False, quality='x_coverage'): \"\"\" Writes the predictions to two files,", "DEFUALT_AUX_RELATION from excut.clustering import target_entities as tes class Prediction: \"\"\"", "quality predictions per entity, :param output_filepath: predictions output file. :param", "*distinct* highest quality predictions per entity, :param output_filepath: predictions output", "in case of functional predicates :param per_entity_prediction: :return: \"\"\" def", "for the given Description :param description: :return: \"\"\" bindings =", "map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else l,", "def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1): \"\"\" Merge the the inferred", "def get_quality(self, measure='x_coverage', method=max): # return self.source_description.get_quality(measure) return method([source.get_quality(measure) for", "for the predictions :param topk: k *distinct* highest quality predictions", "per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk) if output_filepath: dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True,", "from collections import defaultdict from itertools import chain from excut.explanations_mining.descriptions", "# return self.source_description.get_quality(measure) return method([source.get_quality(measure) for source in self.all_sources]) def", "extension that can be parsed in python. :param per_var_predictions: :param", "in the predicted triple (optional) :param quality: objective quality measure", "p in per_obj_predictions.items()]) merged_predictions = list( filter(lambda p: quality_method(p) >", ":return: dictionary of predicted entity-clusters assignments \"\"\" if isinstance(descriptions_list,dict): descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values()))", "Writes the predictions to two files, the first is human", "\"\"\" bindings = self.query_executer.get_arguments_bindings(description, restriction_pattern=Description2(body=[Atom('?x', self.relation, '?z')])) head = description.head", "quality_method(p) > threshold, list(per_obj_predictions.values()))) merged_predictions.sort(key=quality_method, reverse=True) include = topk if", "if target_entities: self.labels_indexer.index_triples(target_entities) self.relation=target_entities.get_relation() predictions = list(map(self._infer_single, descriptions_list)) per_entity_predictions =", "which predictions are generated :param min_quality: minimum aggregated quality for", "quality measure for ranking the predictions (optional) by default the", "them over the KG. The rule-based_deduction takes care of consolidating", "predicted the same triple \"\"\" # def __init__(self, triple: tuple,", "n3_repr from excut.utils.logging import logger from excut.kg.utils.Constants import DEFUALT_AUX_RELATION from", "all_sources: all rules that predicted the same triple \"\"\" #", "from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended from excut.kg.kg_indexing import Indexer from", "__init__(self, **kwargs): pass def infer(self, descriptions, recursive=False, topk=-1): pass class", ":rtype: dict \"\"\" # per_var_predictions = defaultdict(lambda: defaultdict(list)) # for", "= per_entity_predictions[p.get_subject()][p.get_object()] cons_pred.triple = p.triple cons_pred.all_sources += p.all_sources return per_entity_predictions", "per_entity_prediction, threshold=0, topk=-1): \"\"\" Merge the the inferred facts in", "hash(self.triple) class DeductionEngine(): \"\"\" Abstract rulebased_deduction/inference engine. \"\"\" def __init__(self,", "normalized_coverage out_str = n3_repr(p.triple) + ('\\t%f' % p.get_quality(quality) if with_weight", "triple_format: :param topk: :param with_weight: :param with_description: :return: \"\"\" out_file_parsable", "predictions = [Prediction((b, head.predicate, head.object), [description]) for b in bindings]", "else {source_description} def get_subject(self): return self.triple[0] def get_object(self): return self.triple[2]", "consolidate(self, predictions): \"\"\" Combine predictions from different rules :param predictions:", "description.head # only supports p(?x,CONSTANT) predictions = [Prediction((b, head.predicate, head.object),", "import Indexer from excut.kg.utils.data_formating import n3_repr from excut.utils.logging import logger", "list of generated predictions :return: combined single prediction with several", "used for aggregating the score if multiple rules infers the", "from excut.kg.utils.data_formating import n3_repr from excut.utils.logging import logger from excut.kg.utils.Constants", "I only output normalized_coverage out_str = n3_repr(p.triple) + ('\\t%f' %", "= quality self.quality_aggregation = quality_aggregation self.labels_indexer=Indexer(store=kg_query_interface.type, endpoint=kg_query_interface.endpoint, graph= kg_query_interface.labels_graph, identifier=kg_query_interface.labels_identifier)", "output file. :param clear_target_entities: clear indexed target entities after done", "with_description else '') else: out_str = str(p) out_file.write(out_str) out_file.write('\\n') with", "generated predictions :return: combined single prediction with several sources for", "only supports p(?x,CONSTANT) predictions = [Prediction((b, head.predicate, head.object), [description]) for", "dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True, with_description=False, quality='x_coverage'): \"\"\" Writes the", "kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max): \"\"\" :param kg_query_interface: interface for", "import defaultdict from itertools import chain from excut.explanations_mining.descriptions import dump_explanations_to_file", "list() # sources if sources else {source_description} def get_subject(self): return", "output_filepath=None, clear_target_entities=True): \"\"\" Infer new facts for a giving set", "entities after done inference :return: dictionary of predicted entity-clusters assignments", "self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk) if output_filepath: dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True,", "This module contains the rule-based inference (rulebased_deduction engine) \"\"\" import", "predictions :return: combined single prediction with several sources for equivalent", "\"\"\" An object to represent the prediction of the rules", "open(out_filepath_with_type, 'w') as out_file: for var, predictions in per_var_predictions.items(): if", "facts for a giving set of descriptions :param descriptions_list: list", "predictions are generated :param min_quality: minimum aggregated quality for the", "KG. :param relation: the relation used in the predicted triple", "per_obj_predictions.items()]) merged_predictions = list( filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values())))", "readable and the other with .parsable extension that can be", "quality='x_coverage'): \"\"\" Writes the predictions to two files, the first", "\"\"\" Deduction engine that converts the rules to sparql and", "= [Prediction((b, head.predicate, head.object), [description]) for b in bindings] return", "'__main__': target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv') vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql', ['http://yago-expr.org', 'http://yago-expr.org.types'], labels_identifier='http://yago-expr.org.labels') explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length':", "rule-based inference (rulebased_deduction engine) \"\"\" import itertools from collections import", "import logger from excut.kg.utils.Constants import DEFUALT_AUX_RELATION from excut.clustering import target_entities", "0 else len(merged_predictions) per_entity_prediction_filtered[sub] = merged_predictions[:include] return per_entity_prediction_filtered def _infer_single(self,", "labels for which predictions are generated :param min_quality: minimum aggregated", ":param with_description: :return: \"\"\" out_file_parsable = out_filepath + '.parsable' out_filepath_with_type", "= list( filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values()))) merged_predictions.sort(key=quality_method, reverse=True)", "# def __init__(self, triple: tuple, source_description=Description(), all_sources=None): def __init__(self, triple=None,", "min_quality, topk=topk) if output_filepath: dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True, with_description=False,", "per_entity_prediction: :return: \"\"\" def quality_method(p): return p.get_quality(self.quality, self.quality_aggregation) per_entity_prediction_filtered =", "triple (optional) :param quality: objective quality measure for ranking the", "the inferred facts in case of functional predicates :param per_entity_prediction:", "output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt') ded = SparqlBasedDeductionEngineExtended(vos_executer) per_var_predictions = ded.infer(explans, target_entities, output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv') logger.info(\"Total", "per_var_predictions.values()))))) return out_filepath_with_type if __name__ == '__main__': target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv') vos_executer =", "the the inferred facts in case of functional predicates :param", "'?z')])) head = description.head # only supports p(?x,CONSTANT) predictions =", "predictions from different rules :param predictions: list of generated predictions", "str(self.triple) + '<<' + str(self.get_main_description()) def __repr__(self): return \"%s\\t(\\t%s,%s)\" %", "if __name__ == '__main__': target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv') vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql', ['http://yago-expr.org', 'http://yago-expr.org.types'],", "from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended", "triple: tuple, source_description=Description(), all_sources=None): def __init__(self, triple=None, sources=None): self.triple =", "per_entity_predictions = defaultdict(lambda: defaultdict(Prediction)) for p in list(chain.from_iterable(predictions)): cons_pred =", "excut.kg.utils.data_formating import n3_repr from excut.utils.logging import logger from excut.kg.utils.Constants import", "predictions :rtype: dict \"\"\" # per_var_predictions = defaultdict(lambda: defaultdict(list)) #", "if with_weight else '') + ( '\\t%s' % p.source_description if", "chain from excut.explanations_mining.descriptions import dump_explanations_to_file from excut.explanations_mining.descriptions_new import Description2, Atom,", "per_var_predictions[p.get_subject()][p.get_object()].append(p) per_entity_predictions = defaultdict(lambda: defaultdict(Prediction)) for p in list(chain.from_iterable(predictions)): cons_pred", "\"\"\" Abstract rulebased_deduction/inference engine. \"\"\" def __init__(self, **kwargs): pass def", "\"\"\" Combine predictions from different rules :param predictions: list of", "quality if len(quality) > 0 else '') with open(out_filepath_with_type, 'w')", "self.triple[0] def get_object(self): return self.triple[2] def get_quality(self, measure='x_coverage', method=max): #", "excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended from", "the KG. The rule-based_deduction takes care of consolidating similar predictions", "out_str = n3_repr(p.triple) + ('\\t%f' % p.get_quality(quality) if with_weight else", "in per_obj_predictions.items()]) merged_predictions = list( filter(lambda p: quality_method(p) > threshold,", "to sparql and fire them over the KG. The rule-based_deduction", "dump_explanations_to_file from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file from excut.explanations_mining.explaining_engines_extended import", "out_file.write('\\n'.join( map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else", "if multiple rules infers the same fact (optional) by default", "p in predictions: if triple_format: # I only output normalized_coverage", ":return: \"\"\" bindings = self.query_executer.get_arguments_bindings(description, restriction_pattern=Description2(body=[Atom('?x', self.relation, '?z')])) head =", "defaultdict from itertools import chain from excut.explanations_mining.descriptions import dump_explanations_to_file from", "from excut.explanations_mining.descriptions import dump_explanations_to_file from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file", "to represent the prediction of the rules :ivar triple: the", "> 0 else l, per_var_predictions.values()))))) return out_filepath_with_type if __name__ ==", "topk=-1): \"\"\" Merge the the inferred facts in case of", "predictions \"\"\" def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max): \"\"\"", "python. :param per_var_predictions: :param out_filepath: :param triple_format: :param topk: :param", "predicted triple :ivar all_sources: all rules that predicted the same", "'') with open(out_filepath_with_type, 'w') as out_file: for var, predictions in", "d.get_quality(measure)) def __str__(self): return str(self.triple) + '<<' + str(self.get_main_description()) def", "= triple # self.source_description = source_descriptionf self.all_sources = sources if", "vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql', ['http://yago-expr.org', 'http://yago-expr.org.types'], labels_identifier='http://yago-expr.org.labels') explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure':", "their labels for which predictions are generated :param min_quality: minimum", "labels_identifier='http://yago-expr.org.labels') explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE}) explans=explainer.explain(target_entities, output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt') ded =", "descriptions, recursive=False, topk=-1): pass class SparqlBasedDeductionEngineExtended(DeductionEngine): \"\"\" Deduction engine that", "DescriptionMinerExtended, ExplanationStructure from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended from excut.kg.kg_indexing import", "multiple rules infers the same fact (optional) by default max", "rules :param predictions: list of generated predictions :return: combined single", "per_var_predictions = ded.infer(explans, target_entities, output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv') logger.info(\"Total variables with predictions subjects:", "Description2): \"\"\" Infer new facts for the given Description :param", "over the KG. The rule-based_deduction takes care of consolidating similar", "supports p(?x,CONSTANT) predictions = [Prediction((b, head.predicate, head.object), [description]) for b", "p in chain.from_iterable(predictions): # per_var_predictions[p.get_subject()][p.get_object()].append(p) per_entity_predictions = defaultdict(lambda: defaultdict(Prediction)) for", "= n3_repr(p.triple) + ('\\t%f' % p.get_quality(quality) if with_weight else '')", "out_filepath_with_type if __name__ == '__main__': target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv') vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql', ['http://yago-expr.org',", "def get_main_description(self, measure='x_coverage', method=max): return method(self.all_sources, key=lambda d: d.get_quality(measure)) def", "target_entities=None, min_quality=0, topk=-1, output_filepath=None, clear_target_entities=True): \"\"\" Infer new facts for" ]
[ "is_image_file(filename): return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def dataloader(filepath): left_fold", "PIL import Image import os import os.path import numpy as", "np import pdb import glob IMG_EXTENSIONS = [ '.jpg', '.JPG',", "'.PPM', '.bmp', '.BMP', ] def is_image_file(filename): return any(filename.endswith(extension) for extension", "= img.replace('.jpg', '.png').replace('image_2','flow_occ') if (img1 in train and len(glob.glob(flowp))>0 and", "img.replace('.jpg', '.png').replace('image_2','flow_occ') if (img1 in train and len(glob.glob(flowp))>0 and ('01000'", "] def is_image_file(filename): return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def", "return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def dataloader(filepath): left_fold =", "IMG_EXTENSIONS) def dataloader(filepath): left_fold = 'image_2/' train = glob.glob(filepath+left_fold+'/0*.jpg') train", "glob IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG',", "in IMG_EXTENSIONS) def dataloader(filepath): left_fold = 'image_2/' train = glob.glob(filepath+left_fold+'/0*.jpg')", "numpy as np import pdb import glob IMG_EXTENSIONS = [", "as data from PIL import Image import os import os.path", "left_fold = 'image_2/' train = glob.glob(filepath+left_fold+'/0*.jpg') train = sorted(train) l0_train", "[] for img in train: img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) )) flowp", "('01000' not in img)): l0_train.append(img) l1_train.append(img1) flow_train.append(flowp) return l0_train, l1_train,", "len(glob.glob(flowp))>0 and ('01000' not in img)): l0_train.append(img) l1_train.append(img1) flow_train.append(flowp) return", "glob.glob(filepath+left_fold+'/0*.jpg') train = sorted(train) l0_train = [] l1_train = []", "'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', ] def is_image_file(filename): return", "for img in train: img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) )) flowp =", "import torch.utils.data as data from PIL import Image import os", "Image import os import os.path import numpy as np import", "img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) )) flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ') if (img1", "l0_train = [] l1_train = [] flow_train = [] for", "if (img1 in train and len(glob.glob(flowp))>0 and ('01000' not in", "'.PNG', '.ppm', '.PPM', '.bmp', '.BMP', ] def is_image_file(filename): return any(filename.endswith(extension)", "'.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', ] def is_image_file(filename):", "import os.path import numpy as np import pdb import glob", "'.BMP', ] def is_image_file(filename): return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)", "[] l1_train = [] flow_train = [] for img in", "= 'image_2/' train = glob.glob(filepath+left_fold+'/0*.jpg') train = sorted(train) l0_train =", "and ('01000' not in img)): l0_train.append(img) l1_train.append(img1) flow_train.append(flowp) return l0_train,", "img in train: img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) )) flowp = img.replace('.jpg',", "= glob.glob(filepath+left_fold+'/0*.jpg') train = sorted(train) l0_train = [] l1_train =", "import glob IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png',", "[ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp',", "= [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM',", "= [] flow_train = [] for img in train: img1", "torch.utils.data as data from PIL import Image import os import", "'.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', ] def", "train = glob.glob(filepath+left_fold+'/0*.jpg') train = sorted(train) l0_train = [] l1_train", "for extension in IMG_EXTENSIONS) def dataloader(filepath): left_fold = 'image_2/' train", "[] flow_train = [] for img in train: img1 =", "flow_train = [] for img in train: img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1]))", "import pdb import glob IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg',", "and len(glob.glob(flowp))>0 and ('01000' not in img)): l0_train.append(img) l1_train.append(img1) flow_train.append(flowp)", "<reponame>urasakikeisuke/rigidmask<gh_stars>100-1000 import torch.utils.data as data from PIL import Image import", "data from PIL import Image import os import os.path import", "l1_train = [] flow_train = [] for img in train:", "('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) )) flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ') if (img1 in train", "IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm',", "train = sorted(train) l0_train = [] l1_train = [] flow_train", "train: img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) )) flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ') if", "= [] for img in train: img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) ))", "(img1 in train and len(glob.glob(flowp))>0 and ('01000' not in img)):", "os import os.path import numpy as np import pdb import", "= sorted(train) l0_train = [] l1_train = [] flow_train =", "not in img)): l0_train.append(img) l1_train.append(img1) flow_train.append(flowp) return l0_train, l1_train, flow_train", "'.ppm', '.PPM', '.bmp', '.BMP', ] def is_image_file(filename): return any(filename.endswith(extension) for", "as np import pdb import glob IMG_EXTENSIONS = [ '.jpg',", "def is_image_file(filename): return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def dataloader(filepath):", "'.bmp', '.BMP', ] def is_image_file(filename): return any(filename.endswith(extension) for extension in", "'.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',", "import numpy as np import pdb import glob IMG_EXTENSIONS =", "from PIL import Image import os import os.path import numpy", "os.path import numpy as np import pdb import glob IMG_EXTENSIONS", "dataloader(filepath): left_fold = 'image_2/' train = glob.glob(filepath+left_fold+'/0*.jpg') train = sorted(train)", "= ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) )) flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ') if (img1 in", "'.png').replace('image_2','flow_occ') if (img1 in train and len(glob.glob(flowp))>0 and ('01000' not", "sorted(train) l0_train = [] l1_train = [] flow_train = []", "train and len(glob.glob(flowp))>0 and ('01000' not in img)): l0_train.append(img) l1_train.append(img1)", "def dataloader(filepath): left_fold = 'image_2/' train = glob.glob(filepath+left_fold+'/0*.jpg') train =", "any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def dataloader(filepath): left_fold = 'image_2/'", "import os import os.path import numpy as np import pdb", ")) flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ') if (img1 in train and", "'image_2/' train = glob.glob(filepath+left_fold+'/0*.jpg') train = sorted(train) l0_train = []", "pdb import glob IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG',", "in train and len(glob.glob(flowp))>0 and ('01000' not in img)): l0_train.append(img)", "in train: img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) )) flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ')", "= [] l1_train = [] flow_train = [] for img", "'.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', ]", "import Image import os import os.path import numpy as np", "extension in IMG_EXTENSIONS) def dataloader(filepath): left_fold = 'image_2/' train =", "flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ') if (img1 in train and len(glob.glob(flowp))>0" ]
[]
[ "quote_info.get(\"currency\") } job_resp = submit_local_job(request) job_json = job_resp.json job_id =", "quote = store.save_quote(Quote(**quote_json)) workflow_quotes.append(quote.id) process_quote_info.update({\"steps\": workflow_quotes}) quote = store.save_quote(Quote(**process_quote_info)) return", "count, \"page\": page, \"limit\": limit, \"quotes\": [quote.id for quote in", "}) # loop workflow sub-process steps to get individual quotes", "random from datetime import timedelta from typing import TYPE_CHECKING from", "param in [\"inputs\", \"outputs\", \"mode\", \"response\"]: if param in request.json:", "quotes if process_type == PROCESS_WORKFLOW and weaver_config == WEAVER_CONFIGURATION_EMS: workflow_quotes", "elif process_type == PROCESS_APPLICATION: quote = store.save_quote(Quote(**process_quote_info)) quote_json = quote.json()", "import get_package_workflow_steps, get_process_location from weaver.store.base import StoreBills, StoreQuotes from weaver.utils", "a quoted process. \"\"\" quote_info = get_quote_info(request).json[\"quote\"] quote_bill_info = {", "response_schemas=sd.get_quote_list_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_list(request): \"\"\" Get list of quotes", "response_schemas=sd.post_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def execute_quote(request): \"\"\" Execute a quoted process.", "TYPE_CHECKING from duration import to_iso8601 from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated,", "process: instance of :class:`weaver.datatype.Process` for which to evaluate the quote.", "which to evaluate the quote. :return: dict of {price, currency,", "store.fetch_by_id(quote_id) except QuoteNotFound: raise HTTPNotFound(\"Could not find quote with specified", "return HTTPCreated(json={\"quote\": quote_json}) # error if not handled up to", "estimatedTime} values for the process quote. \"\"\" # TODO: replace", "None) return HTTPCreated(json={\"quote\": quote_json}) # error if not handled up", "execute_quote(request): \"\"\" Execute a quoted process. \"\"\" quote_info = get_quote_info(request).json[\"quote\"]", "quote.json()}) # single application quotes (ADES or EMS) elif process_type", "schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_list(request):", "from provider ADES # TODO: data source mapping process_step_url =", "step in get_package_workflow_steps(process_url): # retrieve quote from provider ADES #", "from weaver.utils import get_settings, get_weaver_url from weaver.wps_restapi import swagger_definitions as", "process_quote_estimator(process): # noqa: E811 # type: (Process) -> JSON \"\"\"", "TYPE_CHECKING: from weaver.datatype import Process from weaver.typedefs import JSON LOGGER", "\"process\": process_id, \"processParameters\": process_params, \"location\": process_url, \"user\": str(request.authenticated_userid) }) #", "execution. :param process: instance of :class:`weaver.datatype.Process` for which to evaluate", "weaver.typedefs import JSON LOGGER = logging.getLogger(__name__) def process_quote_estimator(process): # noqa:", "(Process) -> JSON \"\"\" Simulate quote parameters for the process", "type '{0}' on '{1}'.\".format(process_type, weaver_config)) @sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses)", "the quote. :return: dict of {price, currency, estimatedTime} values for", "@sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses) @sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses) @log_unhandled_exceptions(logger=LOGGER,", "for step in get_package_workflow_steps(process_url): # retrieve quote from provider ADES", "a process. \"\"\" settings = get_settings(request) weaver_config = get_weaver_configuration(settings) if", "None), \"page\": page, \"limit\": limit, \"sort\": request.params.get(\"sort\", sort.SORT_CREATED), } store", "@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def request_quote(request): \"\"\" Request a quotation for a", "estimated_time} @sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def request_quote(request):", "noqa: E811 # type: (Process) -> JSON \"\"\" Simulate quote", "\"location\": process_url, \"user\": str(request.authenticated_userid) }) # loop workflow sub-process steps", "dict() for param in [\"inputs\", \"outputs\", \"mode\", \"response\"]: if param", "\"\"\" quote_info = get_quote_info(request).json[\"quote\"] quote_bill_info = { \"quote\": quote_info.get(\"id\"), \"price\":", "parameters for the process execution. :param process: instance of :class:`weaver.datatype.Process`", "@sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostQuote(), response_schemas=sd.post_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def execute_quote(request): \"\"\"", "= request.json.pop(param) process_quote_info = process_quote_estimator(process) process_quote_info.update({ \"process\": process_id, \"processParameters\": process_params,", "for the process execution. :param process: instance of :class:`weaver.datatype.Process` for", "loop workflow sub-process steps to get individual quotes if process_type", "limit = int(request.params.get(\"limit\", \"10\")) filters = { \"process_id\": request.params.get(\"process\", None)", "HTTPNotFound(\"Could not find process with specified 'process_id'.\") store = get_db(request).get_store(StoreQuotes)", "quote.json()}) @sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses) @sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON,", "for which to evaluate the quote. :return: dict of {price,", "if process_type == PROCESS_WORKFLOW and weaver_config == WEAVER_CONFIGURATION_EMS: workflow_quotes =", "HTTPOk from weaver import sort from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS,", "= int(request.params.get(\"page\", \"0\")) limit = int(request.params.get(\"limit\", \"10\")) filters = {", "\"10\")) filters = { \"process_id\": request.params.get(\"process\", None) or request.matchdict.get(\"process_id\", None),", "get_quote_info(request).json[\"quote\"] quote_bill_info = { \"quote\": quote_info.get(\"id\"), \"price\": quote_info.get(\"price\"), \"currency\": quote_info.get(\"currency\")", "import get_settings, get_weaver_url from weaver.wps_restapi import swagger_definitions as sd from", "# noqa: E811 # type: (Process) -> JSON \"\"\" Simulate", "individual quotes if process_type == PROCESS_WORKFLOW and weaver_config == WEAVER_CONFIGURATION_EMS:", "specified 'process_id'.\") store = get_db(request).get_store(StoreQuotes) process_url = get_process_location(process_id, data_source=get_weaver_url(settings)) process_type", "E811 # type: (Process) -> JSON \"\"\" Simulate quote parameters", "datetime import timedelta from typing import TYPE_CHECKING from duration import", "[\"inputs\", \"outputs\", \"mode\", \"response\"]: if param in request.json: process_params[param] =", "return HTTPOk(json={ \"count\": count, \"page\": page, \"limit\": limit, \"quotes\": [quote.id", "# loop workflow sub-process steps to get individual quotes if", "not find process with specified 'process_id'.\") store = get_db(request).get_store(StoreQuotes) process_url", "import PROCESS_APPLICATION, PROCESS_WORKFLOW from weaver.processes.wps_package import get_package_workflow_steps, get_process_location from weaver.store.base", "\"CAD\" estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec return {\"price\": price,", "raise HTTPNotFound(\"Could not find process with specified 'process_id'.\") store =", "limit, \"sort\": request.params.get(\"sort\", sort.SORT_CREATED), } store = get_db(request).get_store(StoreQuotes) items, count", "process type '{0}' on '{1}'.\".format(process_type, weaver_config)) @sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuotesEndpoint(),", "specified 'quote_id'.\") return HTTPOk(json={\"quote\": quote.json()}) @sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuote(),", "from weaver.store.base import StoreBills, StoreQuotes from weaver.utils import get_settings, get_weaver_url", "import Process from weaver.typedefs import JSON LOGGER = logging.getLogger(__name__) def", "evaluate the quote. :return: dict of {price, currency, estimatedTime} values", "random.uniform(0, 10) # nosec currency = \"CAD\" estimated_time = to_iso8601(timedelta(minutes=random.uniform(5,", "in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]: raise HTTPBadRequest(\"Unsupported request for configuration '{}'.\".format(weaver_config)) process_id", "QuoteNotFound, log_unhandled_exceptions from weaver.formats import OUTPUT_FORMAT_JSON from weaver.processes.types import PROCESS_APPLICATION,", "= store.save_quote(Quote(**process_quote_info)) quote_json = quote.json() quote_json.pop(\"steps\", None) return HTTPCreated(json={\"quote\": quote_json})", "str(request.authenticated_userid) store = get_db(request).get_store(StoreBills) bill = store.save_bill(Bill(user=user_id, job=job_id, **quote_bill_info)) job_json.update({\"bill\":", "submit_local_job if TYPE_CHECKING: from weaver.datatype import Process from weaver.typedefs import", "import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk from weaver import sort from", "weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW from weaver.processes.wps_package import get_package_workflow_steps, get_process_location from", "quote_id = request.matchdict.get(\"quote_id\") store = get_db(request).get_store(StoreQuotes) try: quote = store.fetch_by_id(quote_id)", "import Bill, Quote from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions from", "by some fancy ml technique or something? price = random.uniform(0,", "quote = store.save_quote(Quote(**process_quote_info)) quote_json = quote.json() quote_json.pop(\"steps\", None) return HTTPCreated(json={\"quote\":", "find process with specified 'process_id'.\") store = get_db(request).get_store(StoreQuotes) process_url =", "= get_process_location(process_id, data_source=get_weaver_url(settings)) process_type = process.type process_params = dict() for", "submit_local_job(request) job_json = job_resp.json job_id = job_json.get(\"jobID\") user_id = str(request.authenticated_userid)", "import swagger_definitions as sd from weaver.wps_restapi.processes.processes import submit_local_job if TYPE_CHECKING:", "sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def request_quote(request): \"\"\" Request", "items] }) @sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses) @sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuoteEndpoint(),", "@sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_info(request): \"\"\" Get", "\"mode\", \"response\"]: if param in request.json: process_params[param] = request.json.pop(param) process_quote_info", "user_id = str(request.authenticated_userid) store = get_db(request).get_store(StoreBills) bill = store.save_bill(Bill(user=user_id, job=job_id,", "estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec return {\"price\": price, \"currency\":", "sort.SORT_CREATED), } store = get_db(request).get_store(StoreQuotes) items, count = store.find_quotes(**filters) return", "in items] }) @sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses) @sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON,", "process_quote_info = process_quote_estimator(process) process_quote_info.update({ \"process\": process_id, \"processParameters\": process_params, \"location\": process_url,", "} job_resp = submit_local_job(request) job_json = job_resp.json job_id = job_json.get(\"jobID\")", "WEAVER_CONFIGURATION_EMS, get_weaver_configuration from weaver.database import get_db from weaver.datatype import Bill,", "get_db(request).get_store(StoreQuotes) process_url = get_process_location(process_id, data_source=get_weaver_url(settings)) process_type = process.type process_params =", "renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses) @sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostQuote(), response_schemas=sd.post_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)", "# error if not handled up to this point raise", "job_resp = submit_local_job(request) job_json = job_resp.json job_id = job_json.get(\"jobID\") user_id", "{price, currency, estimatedTime} values for the process quote. \"\"\" #", "\"currency\": currency, \"estimatedTime\": estimated_time} @sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses) @log_unhandled_exceptions(logger=LOGGER,", "job_json.get(\"jobID\") user_id = str(request.authenticated_userid) store = get_db(request).get_store(StoreBills) bill = store.save_bill(Bill(user=user_id,", "count = store.find_quotes(**filters) return HTTPOk(json={ \"count\": count, \"page\": page, \"limit\":", "from weaver import sort from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration", "get_package_workflow_steps, get_process_location from weaver.store.base import StoreBills, StoreQuotes from weaver.utils import", "from weaver.wps_restapi.processes.processes import submit_local_job if TYPE_CHECKING: from weaver.datatype import Process", "request.matchdict.get(\"process_id\") process_store = get_db(request).get_store(\"processes\") try: process = process_store.fetch_by_id(process_id) except ProcessNotFound:", "Simulate quote parameters for the process execution. :param process: instance", "HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk from weaver import sort from weaver.config", "QuoteNotFound: raise HTTPNotFound(\"Could not find quote with specified 'quote_id'.\") return", "response_schemas=sd.post_quotes_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def request_quote(request): \"\"\" Request a quotation for", "weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration from weaver.database import get_db from", "response_schemas=sd.get_quote_list_responses) @sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_list(request): \"\"\"", "with specified 'quote_id'.\") return HTTPOk(json={\"quote\": quote.json()}) @sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,", "from weaver.processes.wps_package import get_package_workflow_steps, get_process_location from weaver.store.base import StoreBills, StoreQuotes", "weaver.formats import OUTPUT_FORMAT_JSON from weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW from weaver.processes.wps_package", "request.copy() subreq.path_info = process_quote_url resp_json = request.invoke_subrequest(subreq).json() quote_json = resp_json[\"quote\"]", "def get_quote_info(request): \"\"\" Get quote information. \"\"\" quote_id = request.matchdict.get(\"quote_id\")", ":class:`weaver.datatype.Process` for which to evaluate the quote. :return: dict of", "subreq.path_info = process_quote_url resp_json = request.invoke_subrequest(subreq).json() quote_json = resp_json[\"quote\"] quote", "find quote with specified 'quote_id'.\") return HTTPOk(json={\"quote\": quote.json()}) @sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE,", "HTTPBadRequest(\"Unsupported quoting process type '{0}' on '{1}'.\".format(process_type, weaver_config)) @sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES],", "resp_json = request.invoke_subrequest(subreq).json() quote_json = resp_json[\"quote\"] quote = store.save_quote(Quote(**quote_json)) workflow_quotes.append(quote.id)", "to evaluate the quote. :return: dict of {price, currency, estimatedTime}", "process_url, \"user\": str(request.authenticated_userid) }) # loop workflow sub-process steps to", "request.params.get(\"process\", None) or request.matchdict.get(\"process_id\", None), \"page\": page, \"limit\": limit, \"sort\":", "HTTPCreated, HTTPNotFound, HTTPOk from weaver import sort from weaver.config import", "= get_db(request).get_store(StoreBills) bill = store.save_bill(Bill(user=user_id, job=job_id, **quote_bill_info)) job_json.update({\"bill\": bill.id}) return", "currency, estimatedTime} values for the process quote. \"\"\" # TODO:", "subreq = request.copy() subreq.path_info = process_quote_url resp_json = request.invoke_subrequest(subreq).json() quote_json", "quote parameters for the process execution. :param process: instance of", "workflow_quotes.append(quote.id) process_quote_info.update({\"steps\": workflow_quotes}) quote = store.save_quote(Quote(**process_quote_info)) return HTTPCreated(json={\"quote\": quote.json()}) #", "sd from weaver.wps_restapi.processes.processes import submit_local_job if TYPE_CHECKING: from weaver.datatype import", "except ProcessNotFound: raise HTTPNotFound(\"Could not find process with specified 'process_id'.\")", "\"process_id\": request.params.get(\"process\", None) or request.matchdict.get(\"process_id\", None), \"page\": page, \"limit\": limit,", "\"\"\" quote_id = request.matchdict.get(\"quote_id\") store = get_db(request).get_store(StoreQuotes) try: quote =", "= get_weaver_configuration(settings) if weaver_config not in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]: raise HTTPBadRequest(\"Unsupported", "\"\"\" Get list of quotes IDs. \"\"\" page = int(request.params.get(\"page\",", "def request_quote(request): \"\"\" Request a quotation for a process. \"\"\"", "price, \"currency\": currency, \"estimatedTime\": estimated_time} @sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses)", "renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses) @sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def", "(ADES or EMS) elif process_type == PROCESS_APPLICATION: quote = store.save_quote(Quote(**process_quote_info))", "Process from weaver.typedefs import JSON LOGGER = logging.getLogger(__name__) def process_quote_estimator(process):", "raise HTTPBadRequest(\"Unsupported request for configuration '{}'.\".format(weaver_config)) process_id = request.matchdict.get(\"process_id\") process_store", "Get quote information. \"\"\" quote_id = request.matchdict.get(\"quote_id\") store = get_db(request).get_store(StoreQuotes)", "settings = get_settings(request) weaver_config = get_weaver_configuration(settings) if weaver_config not in", "param in request.json: process_params[param] = request.json.pop(param) process_quote_info = process_quote_estimator(process) process_quote_info.update({", "'quote_id'.\") return HTTPOk(json={\"quote\": quote.json()}) @sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses)", "not find quote with specified 'quote_id'.\") return HTTPOk(json={\"quote\": quote.json()}) @sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE,", "get_db(request).get_store(StoreQuotes) try: quote = store.fetch_by_id(quote_id) except QuoteNotFound: raise HTTPNotFound(\"Could not", "\"price\": quote_info.get(\"price\"), \"currency\": quote_info.get(\"currency\") } job_resp = submit_local_job(request) job_json =", "\"page\": page, \"limit\": limit, \"quotes\": [quote.id for quote in items]", "quote_info = get_quote_info(request).json[\"quote\"] quote_bill_info = { \"quote\": quote_info.get(\"id\"), \"price\": quote_info.get(\"price\"),", "Quote from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions from weaver.formats import", "@sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @log_unhandled_exceptions(logger=LOGGER,", "get_package_workflow_steps(process_url): # retrieve quote from provider ADES # TODO: data", "HTTPNotFound, HTTPOk from weaver import sort from weaver.config import WEAVER_CONFIGURATION_ADES,", "int(request.params.get(\"page\", \"0\")) limit = int(request.params.get(\"limit\", \"10\")) filters = { \"process_id\":", "get_db from weaver.datatype import Bill, Quote from weaver.exceptions import ProcessNotFound,", "for a process. \"\"\" settings = get_settings(request) weaver_config = get_weaver_configuration(settings)", "@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_info(request): \"\"\" Get quote information. \"\"\" quote_id", "response_schemas=sd.post_quote_responses) @sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostQuote(), response_schemas=sd.post_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def execute_quote(request):", "raise HTTPBadRequest(\"Unsupported quoting process type '{0}' on '{1}'.\".format(process_type, weaver_config)) @sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE,", "process. \"\"\" settings = get_settings(request) weaver_config = get_weaver_configuration(settings) if weaver_config", "= job_resp.json job_id = job_json.get(\"jobID\") user_id = str(request.authenticated_userid) store =", "@sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_list(request): \"\"\" Get", "= process.type process_params = dict() for param in [\"inputs\", \"outputs\",", "weaver import sort from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration from", "process_quote_estimator(process) process_quote_info.update({ \"process\": process_id, \"processParameters\": process_params, \"location\": process_url, \"user\": str(request.authenticated_userid)", "this point raise HTTPBadRequest(\"Unsupported quoting process type '{0}' on '{1}'.\".format(process_type,", "renderer=OUTPUT_FORMAT_JSON, schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_info(request): \"\"\" Get quote", "quote_bill_info = { \"quote\": quote_info.get(\"id\"), \"price\": quote_info.get(\"price\"), \"currency\": quote_info.get(\"currency\") }", "PROCESS_APPLICATION: quote = store.save_quote(Quote(**process_quote_info)) quote_json = quote.json() quote_json.pop(\"steps\", None) return", "# TODO: replace by some fancy ml technique or something?", "from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration from weaver.database import get_db", "from weaver.database import get_db from weaver.datatype import Bill, Quote from", "= \"{}/quotations\".format(process_step_url) subreq = request.copy() subreq.path_info = process_quote_url resp_json =", "process quote. \"\"\" # TODO: replace by some fancy ml", "quote = store.save_quote(Quote(**process_quote_info)) return HTTPCreated(json={\"quote\": quote.json()}) # single application quotes", "or EMS) elif process_type == PROCESS_APPLICATION: quote = store.save_quote(Quote(**process_quote_info)) quote_json", "message=sd.InternalServerErrorResponseSchema.description) def get_quote_list(request): \"\"\" Get list of quotes IDs. \"\"\"", "get_quote_info(request): \"\"\" Get quote information. \"\"\" quote_id = request.matchdict.get(\"quote_id\") store", "# single application quotes (ADES or EMS) elif process_type ==", "logging import random from datetime import timedelta from typing import", "for quote in items] }) @sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses)", "HTTPOk(json={\"quote\": quote.json()}) @sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses) @sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE],", "= resp_json[\"quote\"] quote = store.save_quote(Quote(**quote_json)) workflow_quotes.append(quote.id) process_quote_info.update({\"steps\": workflow_quotes}) quote =", "workflow sub-process steps to get individual quotes if process_type ==", "resp_json[\"quote\"] quote = store.save_quote(Quote(**quote_json)) workflow_quotes.append(quote.id) process_quote_info.update({\"steps\": workflow_quotes}) quote = store.save_quote(Quote(**process_quote_info))", "store = get_db(request).get_store(StoreBills) bill = store.save_bill(Bill(user=user_id, job=job_id, **quote_bill_info)) job_json.update({\"bill\": bill.id})", "schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses) @sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostQuote(), response_schemas=sd.post_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def", "the process quote. \"\"\" # TODO: replace by some fancy", "log_unhandled_exceptions from weaver.formats import OUTPUT_FORMAT_JSON from weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW", "= quote.json() quote_json.pop(\"steps\", None) return HTTPCreated(json={\"quote\": quote_json}) # error if", "quote. \"\"\" # TODO: replace by some fancy ml technique", "weaver.wps_restapi.processes.processes import submit_local_job if TYPE_CHECKING: from weaver.datatype import Process from", "swagger_definitions as sd from weaver.wps_restapi.processes.processes import submit_local_job if TYPE_CHECKING: from", "def process_quote_estimator(process): # noqa: E811 # type: (Process) -> JSON", "sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostQuote(), response_schemas=sd.post_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def execute_quote(request): \"\"\" Execute", "'{1}'.\".format(process_type, weaver_config)) @sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuotesEndpoint(),", "sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)", "renderer=OUTPUT_FORMAT_JSON, schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_list(request): \"\"\" Get list", ":return: dict of {price, currency, estimatedTime} values for the process", "import TYPE_CHECKING from duration import to_iso8601 from pyramid.httpexceptions import HTTPBadRequest,", "or request.matchdict.get(\"process_id\", None), \"page\": page, \"limit\": limit, \"sort\": request.params.get(\"sort\", sort.SORT_CREATED),", "Request a quotation for a process. \"\"\" settings = get_settings(request)", "from weaver.formats import OUTPUT_FORMAT_JSON from weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW from", "[WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]: raise HTTPBadRequest(\"Unsupported request for configuration '{}'.\".format(weaver_config)) process_id =", "process_quote_info.update({\"steps\": workflow_quotes}) quote = store.save_quote(Quote(**process_quote_info)) return HTTPCreated(json={\"quote\": quote.json()}) # single", "page = int(request.params.get(\"page\", \"0\")) limit = int(request.params.get(\"limit\", \"10\")) filters =", "quote_info.get(\"id\"), \"price\": quote_info.get(\"price\"), \"currency\": quote_info.get(\"currency\") } job_resp = submit_local_job(request) job_json", "process_type == PROCESS_WORKFLOW and weaver_config == WEAVER_CONFIGURATION_EMS: workflow_quotes = list()", "\"estimatedTime\": estimated_time} @sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def", "\"\"\" settings = get_settings(request) weaver_config = get_weaver_configuration(settings) if weaver_config not", "= get_db(request).get_store(\"processes\") try: process = process_store.fetch_by_id(process_id) except ProcessNotFound: raise HTTPNotFound(\"Could", "response_schemas=sd.get_quote_responses) @sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_info(request): \"\"\"", "with specified 'process_id'.\") store = get_db(request).get_store(StoreQuotes) process_url = get_process_location(process_id, data_source=get_weaver_url(settings))", "for param in [\"inputs\", \"outputs\", \"mode\", \"response\"]: if param in", "nosec currency = \"CAD\" estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec", "limit, \"quotes\": [quote.id for quote in items] }) @sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES],", "import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration from weaver.database import get_db from weaver.datatype", "int(request.params.get(\"limit\", \"10\")) filters = { \"process_id\": request.params.get(\"process\", None) or request.matchdict.get(\"process_id\",", "sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses) @sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)", "request.json: process_params[param] = request.json.pop(param) process_quote_info = process_quote_estimator(process) process_quote_info.update({ \"process\": process_id,", "StoreQuotes from weaver.utils import get_settings, get_weaver_url from weaver.wps_restapi import swagger_definitions", "= request.matchdict.get(\"process_id\") process_store = get_db(request).get_store(\"processes\") try: process = process_store.fetch_by_id(process_id) except", "\"response\"]: if param in request.json: process_params[param] = request.json.pop(param) process_quote_info =", "from weaver.datatype import Bill, Quote from weaver.exceptions import ProcessNotFound, QuoteNotFound,", "return HTTPCreated(json={\"quote\": quote.json()}) # single application quotes (ADES or EMS)", "currency, \"estimatedTime\": estimated_time} @sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)", "\"sort\": request.params.get(\"sort\", sort.SORT_CREATED), } store = get_db(request).get_store(StoreQuotes) items, count =", "source mapping process_step_url = get_process_location(step[\"reference\"]) process_quote_url = \"{}/quotations\".format(process_step_url) subreq =", "= store.save_quote(Quote(**quote_json)) workflow_quotes.append(quote.id) process_quote_info.update({\"steps\": workflow_quotes}) quote = store.save_quote(Quote(**process_quote_info)) return HTTPCreated(json={\"quote\":", "HTTPCreated(json={\"quote\": quote.json()}) # single application quotes (ADES or EMS) elif", "to_iso8601 from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk from weaver", "weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions from weaver.formats import OUTPUT_FORMAT_JSON from", "@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_list(request): \"\"\" Get list of quotes IDs.", "from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions from weaver.formats import OUTPUT_FORMAT_JSON", "PROCESS_APPLICATION, PROCESS_WORKFLOW from weaver.processes.wps_package import get_package_workflow_steps, get_process_location from weaver.store.base import", "ProcessNotFound, QuoteNotFound, log_unhandled_exceptions from weaver.formats import OUTPUT_FORMAT_JSON from weaver.processes.types import", "weaver.database import get_db from weaver.datatype import Bill, Quote from weaver.exceptions", "JSON \"\"\" Simulate quote parameters for the process execution. :param", "= \"CAD\" estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec return {\"price\":", "= list() for step in get_package_workflow_steps(process_url): # retrieve quote from", "get_process_location from weaver.store.base import StoreBills, StoreQuotes from weaver.utils import get_settings,", "sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses) @sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostQuote(), response_schemas=sd.post_quote_responses) @log_unhandled_exceptions(logger=LOGGER,", "renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def request_quote(request): \"\"\" Request a", "get_settings(request) weaver_config = get_weaver_configuration(settings) if weaver_config not in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]:", "import JSON LOGGER = logging.getLogger(__name__) def process_quote_estimator(process): # noqa: E811", "quote_json = quote.json() quote_json.pop(\"steps\", None) return HTTPCreated(json={\"quote\": quote_json}) # error", "job_resp.json job_id = job_json.get(\"jobID\") user_id = str(request.authenticated_userid) store = get_db(request).get_store(StoreBills)", "sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses) @sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostQuote(), response_schemas=sd.post_quote_responses)", "in [\"inputs\", \"outputs\", \"mode\", \"response\"]: if param in request.json: process_params[param]", "\"\"\" Get quote information. \"\"\" quote_id = request.matchdict.get(\"quote_id\") store =", "HTTPOk(json={ \"count\": count, \"page\": page, \"limit\": limit, \"quotes\": [quote.id for", "\"limit\": limit, \"sort\": request.params.get(\"sort\", sort.SORT_CREATED), } store = get_db(request).get_store(StoreQuotes) items,", "10) # nosec currency = \"CAD\" estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60)))", "def get_quote_list(request): \"\"\" Get list of quotes IDs. \"\"\" page", "def execute_quote(request): \"\"\" Execute a quoted process. \"\"\" quote_info =", "= get_db(request).get_store(StoreQuotes) try: quote = store.fetch_by_id(quote_id) except QuoteNotFound: raise HTTPNotFound(\"Could", "quote from provider ADES # TODO: data source mapping process_step_url", "renderer=OUTPUT_FORMAT_JSON, schema=sd.PostQuote(), response_schemas=sd.post_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def execute_quote(request): \"\"\" Execute a", "weaver.utils import get_settings, get_weaver_url from weaver.wps_restapi import swagger_definitions as sd", "process_store.fetch_by_id(process_id) except ProcessNotFound: raise HTTPNotFound(\"Could not find process with specified", "= dict() for param in [\"inputs\", \"outputs\", \"mode\", \"response\"]: if", "quote_json = resp_json[\"quote\"] quote = store.save_quote(Quote(**quote_json)) workflow_quotes.append(quote.id) process_quote_info.update({\"steps\": workflow_quotes}) quote", "steps to get individual quotes if process_type == PROCESS_WORKFLOW and", "in get_package_workflow_steps(process_url): # retrieve quote from provider ADES # TODO:", "sub-process steps to get individual quotes if process_type == PROCESS_WORKFLOW", "= store.save_quote(Quote(**process_quote_info)) return HTTPCreated(json={\"quote\": quote.json()}) # single application quotes (ADES", "quotes (ADES or EMS) elif process_type == PROCESS_APPLICATION: quote =", "to this point raise HTTPBadRequest(\"Unsupported quoting process type '{0}' on", "quote information. \"\"\" quote_id = request.matchdict.get(\"quote_id\") store = get_db(request).get_store(StoreQuotes) try:", "quote. :return: dict of {price, currency, estimatedTime} values for the", "= process_quote_url resp_json = request.invoke_subrequest(subreq).json() quote_json = resp_json[\"quote\"] quote =", "configuration '{}'.\".format(weaver_config)) process_id = request.matchdict.get(\"process_id\") process_store = get_db(request).get_store(\"processes\") try: process", "message=sd.InternalServerErrorResponseSchema.description) def get_quote_info(request): \"\"\" Get quote information. \"\"\" quote_id =", "on '{1}'.\".format(process_type, weaver_config)) @sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON,", "\"count\": count, \"page\": page, \"limit\": limit, \"quotes\": [quote.id for quote", "== PROCESS_WORKFLOW and weaver_config == WEAVER_CONFIGURATION_EMS: workflow_quotes = list() for", "sort from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration from weaver.database import", "process_quote_url = \"{}/quotations\".format(process_step_url) subreq = request.copy() subreq.path_info = process_quote_url resp_json", "process_type == PROCESS_APPLICATION: quote = store.save_quote(Quote(**process_quote_info)) quote_json = quote.json() quote_json.pop(\"steps\",", "return HTTPOk(json={\"quote\": quote.json()}) @sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses) @sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE,", "duration import to_iso8601 from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk", "weaver.datatype import Bill, Quote from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions", "up to this point raise HTTPBadRequest(\"Unsupported quoting process type '{0}'", "Get list of quotes IDs. \"\"\" page = int(request.params.get(\"page\", \"0\"))", "quote in items] }) @sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses) @sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE],", "get_settings, get_weaver_url from weaver.wps_restapi import swagger_definitions as sd from weaver.wps_restapi.processes.processes", "handled up to this point raise HTTPBadRequest(\"Unsupported quoting process type", "process_params, \"location\": process_url, \"user\": str(request.authenticated_userid) }) # loop workflow sub-process", "\"{}/quotations\".format(process_step_url) subreq = request.copy() subreq.path_info = process_quote_url resp_json = request.invoke_subrequest(subreq).json()", "page, \"limit\": limit, \"quotes\": [quote.id for quote in items] })", "-> JSON \"\"\" Simulate quote parameters for the process execution.", "quotation for a process. \"\"\" settings = get_settings(request) weaver_config =", "if not handled up to this point raise HTTPBadRequest(\"Unsupported quoting", "data_source=get_weaver_url(settings)) process_type = process.type process_params = dict() for param in", "weaver.store.base import StoreBills, StoreQuotes from weaver.utils import get_settings, get_weaver_url from", "str(request.authenticated_userid) }) # loop workflow sub-process steps to get individual", "try: quote = store.fetch_by_id(quote_id) except QuoteNotFound: raise HTTPNotFound(\"Could not find", "} store = get_db(request).get_store(StoreQuotes) items, count = store.find_quotes(**filters) return HTTPOk(json={", "quote = store.fetch_by_id(quote_id) except QuoteNotFound: raise HTTPNotFound(\"Could not find quote", "instance of :class:`weaver.datatype.Process` for which to evaluate the quote. :return:", "JSON LOGGER = logging.getLogger(__name__) def process_quote_estimator(process): # noqa: E811 #", "process execution. :param process: instance of :class:`weaver.datatype.Process` for which to", "= store.fetch_by_id(quote_id) except QuoteNotFound: raise HTTPNotFound(\"Could not find quote with", "import random from datetime import timedelta from typing import TYPE_CHECKING", "import sort from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration from weaver.database", "of quotes IDs. \"\"\" page = int(request.params.get(\"page\", \"0\")) limit =", "import get_db from weaver.datatype import Bill, Quote from weaver.exceptions import", "HTTPCreated(json={\"quote\": quote_json}) # error if not handled up to this", "from weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW from weaver.processes.wps_package import get_package_workflow_steps, get_process_location", "workflow_quotes = list() for step in get_package_workflow_steps(process_url): # retrieve quote", "request.matchdict.get(\"process_id\", None), \"page\": page, \"limit\": limit, \"sort\": request.params.get(\"sort\", sort.SORT_CREATED), }", "@sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def request_quote(request): \"\"\"", "get individual quotes if process_type == PROCESS_WORKFLOW and weaver_config ==", "= random.uniform(0, 10) # nosec currency = \"CAD\" estimated_time =", "dict of {price, currency, estimatedTime} values for the process quote.", "\"user\": str(request.authenticated_userid) }) # loop workflow sub-process steps to get", "a quotation for a process. \"\"\" settings = get_settings(request) weaver_config", "\"\"\" Execute a quoted process. \"\"\" quote_info = get_quote_info(request).json[\"quote\"] quote_bill_info", "replace by some fancy ml technique or something? price =", ":param process: instance of :class:`weaver.datatype.Process` for which to evaluate the", "some fancy ml technique or something? price = random.uniform(0, 10)", "== WEAVER_CONFIGURATION_EMS: workflow_quotes = list() for step in get_package_workflow_steps(process_url): #", "WEAVER_CONFIGURATION_EMS: workflow_quotes = list() for step in get_package_workflow_steps(process_url): # retrieve", "import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions from weaver.formats import OUTPUT_FORMAT_JSON from weaver.processes.types", "ADES # TODO: data source mapping process_step_url = get_process_location(step[\"reference\"]) process_quote_url", "\"limit\": limit, \"quotes\": [quote.id for quote in items] }) @sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE,", "get_db(request).get_store(\"processes\") try: process = process_store.fetch_by_id(process_id) except ProcessNotFound: raise HTTPNotFound(\"Could not", "# nosec currency = \"CAD\" estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60))) #", "mapping process_step_url = get_process_location(step[\"reference\"]) process_quote_url = \"{}/quotations\".format(process_step_url) subreq = request.copy()", "fancy ml technique or something? price = random.uniform(0, 10) #", "{\"price\": price, \"currency\": currency, \"estimatedTime\": estimated_time} @sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuoteRequestEndpoint(),", "PROCESS_WORKFLOW and weaver_config == WEAVER_CONFIGURATION_EMS: workflow_quotes = list() for step", "items, count = store.find_quotes(**filters) return HTTPOk(json={ \"count\": count, \"page\": page,", "quote_info.get(\"price\"), \"currency\": quote_info.get(\"currency\") } job_resp = submit_local_job(request) job_json = job_resp.json", "\"outputs\", \"mode\", \"response\"]: if param in request.json: process_params[param] = request.json.pop(param)", "get_quote_list(request): \"\"\" Get list of quotes IDs. \"\"\" page =", "schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_list(request): \"\"\" Get list of", "weaver.datatype import Process from weaver.typedefs import JSON LOGGER = logging.getLogger(__name__)", "quoting process type '{0}' on '{1}'.\".format(process_type, weaver_config)) @sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,", "process_store = get_db(request).get_store(\"processes\") try: process = process_store.fetch_by_id(process_id) except ProcessNotFound: raise", "error if not handled up to this point raise HTTPBadRequest(\"Unsupported", "\"\"\" Request a quotation for a process. \"\"\" settings =", "quote with specified 'quote_id'.\") return HTTPOk(json={\"quote\": quote.json()}) @sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES],", "page, \"limit\": limit, \"sort\": request.params.get(\"sort\", sort.SORT_CREATED), } store = get_db(request).get_store(StoreQuotes)", "import timedelta from typing import TYPE_CHECKING from duration import to_iso8601", "get_process_location(process_id, data_source=get_weaver_url(settings)) process_type = process.type process_params = dict() for param", "quotes IDs. \"\"\" page = int(request.params.get(\"page\", \"0\")) limit = int(request.params.get(\"limit\",", "renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def", "store = get_db(request).get_store(StoreQuotes) process_url = get_process_location(process_id, data_source=get_weaver_url(settings)) process_type = process.type", "'{}'.\".format(weaver_config)) process_id = request.matchdict.get(\"process_id\") process_store = get_db(request).get_store(\"processes\") try: process =", "= get_db(request).get_store(StoreQuotes) process_url = get_process_location(process_id, data_source=get_weaver_url(settings)) process_type = process.type process_params", "ProcessNotFound: raise HTTPNotFound(\"Could not find process with specified 'process_id'.\") store", "[quote.id for quote in items] }) @sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuoteEndpoint(),", "HTTPNotFound(\"Could not find quote with specified 'quote_id'.\") return HTTPOk(json={\"quote\": quote.json()})", "{ \"quote\": quote_info.get(\"id\"), \"price\": quote_info.get(\"price\"), \"currency\": quote_info.get(\"currency\") } job_resp =", "= job_json.get(\"jobID\") user_id = str(request.authenticated_userid) store = get_db(request).get_store(StoreBills) bill =", "= logging.getLogger(__name__) def process_quote_estimator(process): # noqa: E811 # type: (Process)", "from duration import to_iso8601 from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound,", "TODO: replace by some fancy ml technique or something? price", "== PROCESS_APPLICATION: quote = store.save_quote(Quote(**process_quote_info)) quote_json = quote.json() quote_json.pop(\"steps\", None)", "process_quote_url resp_json = request.invoke_subrequest(subreq).json() quote_json = resp_json[\"quote\"] quote = store.save_quote(Quote(**quote_json))", "request for configuration '{}'.\".format(weaver_config)) process_id = request.matchdict.get(\"process_id\") process_store = get_db(request).get_store(\"processes\")", "\"\"\" # TODO: replace by some fancy ml technique or", "raise HTTPNotFound(\"Could not find quote with specified 'quote_id'.\") return HTTPOk(json={\"quote\":", "Execute a quoted process. \"\"\" quote_info = get_quote_info(request).json[\"quote\"] quote_bill_info =", "nosec return {\"price\": price, \"currency\": currency, \"estimatedTime\": estimated_time} @sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES],", "process. \"\"\" quote_info = get_quote_info(request).json[\"quote\"] quote_bill_info = { \"quote\": quote_info.get(\"id\"),", "import OUTPUT_FORMAT_JSON from weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW from weaver.processes.wps_package import", "from typing import TYPE_CHECKING from duration import to_iso8601 from pyramid.httpexceptions", "EMS) elif process_type == PROCESS_APPLICATION: quote = store.save_quote(Quote(**process_quote_info)) quote_json =", "if TYPE_CHECKING: from weaver.datatype import Process from weaver.typedefs import JSON", "{ \"process_id\": request.params.get(\"process\", None) or request.matchdict.get(\"process_id\", None), \"page\": page, \"limit\":", "None) or request.matchdict.get(\"process_id\", None), \"page\": page, \"limit\": limit, \"sort\": request.params.get(\"sort\",", "= request.matchdict.get(\"quote_id\") store = get_db(request).get_store(StoreQuotes) try: quote = store.fetch_by_id(quote_id) except", "from weaver.typedefs import JSON LOGGER = logging.getLogger(__name__) def process_quote_estimator(process): #", "process_quote_info.update({ \"process\": process_id, \"processParameters\": process_params, \"location\": process_url, \"user\": str(request.authenticated_userid) })", "# nosec return {\"price\": price, \"currency\": currency, \"estimatedTime\": estimated_time} @sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE,", "process_id = request.matchdict.get(\"process_id\") process_store = get_db(request).get_store(\"processes\") try: process = process_store.fetch_by_id(process_id)", "and weaver_config == WEAVER_CONFIGURATION_EMS: workflow_quotes = list() for step in", "get_db(request).get_store(StoreQuotes) items, count = store.find_quotes(**filters) return HTTPOk(json={ \"count\": count, \"page\":", "= store.find_quotes(**filters) return HTTPOk(json={ \"count\": count, \"page\": page, \"limit\": limit,", "from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk from weaver import", "StoreBills, StoreQuotes from weaver.utils import get_settings, get_weaver_url from weaver.wps_restapi import", "= get_process_location(step[\"reference\"]) process_quote_url = \"{}/quotations\".format(process_step_url) subreq = request.copy() subreq.path_info =", "get_db(request).get_store(StoreBills) bill = store.save_bill(Bill(user=user_id, job=job_id, **quote_bill_info)) job_json.update({\"bill\": bill.id}) return HTTPCreated(json=job_json)", "of {price, currency, estimatedTime} values for the process quote. \"\"\"", "response_schemas=sd.get_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_info(request): \"\"\" Get quote information. \"\"\"", "HTTPBadRequest(\"Unsupported request for configuration '{}'.\".format(weaver_config)) process_id = request.matchdict.get(\"process_id\") process_store =", "workflow_quotes}) quote = store.save_quote(Quote(**process_quote_info)) return HTTPCreated(json={\"quote\": quote.json()}) # single application", "weaver.processes.wps_package import get_package_workflow_steps, get_process_location from weaver.store.base import StoreBills, StoreQuotes from", "@sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses) @sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.PostQuote(),", "# retrieve quote from provider ADES # TODO: data source", "process = process_store.fetch_by_id(process_id) except ProcessNotFound: raise HTTPNotFound(\"Could not find process", "for the process quote. \"\"\" # TODO: replace by some", "= to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec return {\"price\": price, \"currency\": currency,", "to get individual quotes if process_type == PROCESS_WORKFLOW and weaver_config", "currency = \"CAD\" estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec return", "'process_id'.\") store = get_db(request).get_store(StoreQuotes) process_url = get_process_location(process_id, data_source=get_weaver_url(settings)) process_type =", "'{0}' on '{1}'.\".format(process_type, weaver_config)) @sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE],", "not in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]: raise HTTPBadRequest(\"Unsupported request for configuration '{}'.\".format(weaver_config))", "= str(request.authenticated_userid) store = get_db(request).get_store(StoreBills) bill = store.save_bill(Bill(user=user_id, job=job_id, **quote_bill_info))", "process_step_url = get_process_location(step[\"reference\"]) process_quote_url = \"{}/quotations\".format(process_step_url) subreq = request.copy() subreq.path_info", "WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration from weaver.database import get_db from weaver.datatype import", "message=sd.InternalServerErrorResponseSchema.description) def request_quote(request): \"\"\" Request a quotation for a process.", "weaver_config not in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]: raise HTTPBadRequest(\"Unsupported request for configuration", "}) @sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses) @sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses)", "= submit_local_job(request) job_json = job_resp.json job_id = job_json.get(\"jobID\") user_id =", "technique or something? price = random.uniform(0, 10) # nosec currency", "price = random.uniform(0, 10) # nosec currency = \"CAD\" estimated_time", "from weaver.wps_restapi import swagger_definitions as sd from weaver.wps_restapi.processes.processes import submit_local_job", "# type: (Process) -> JSON \"\"\" Simulate quote parameters for", "process_type = process.type process_params = dict() for param in [\"inputs\",", "= get_settings(request) weaver_config = get_weaver_configuration(settings) if weaver_config not in [WEAVER_CONFIGURATION_ADES,", "store.save_quote(Quote(**quote_json)) workflow_quotes.append(quote.id) process_quote_info.update({\"steps\": workflow_quotes}) quote = store.save_quote(Quote(**process_quote_info)) return HTTPCreated(json={\"quote\": quote.json()})", "weaver_config)) @sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON, schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses) @sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses)", "application quotes (ADES or EMS) elif process_type == PROCESS_APPLICATION: quote", "import StoreBills, StoreQuotes from weaver.utils import get_settings, get_weaver_url from weaver.wps_restapi", "process_params[param] = request.json.pop(param) process_quote_info = process_quote_estimator(process) process_quote_info.update({ \"process\": process_id, \"processParameters\":", "schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses) @sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON, schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_info(request):", "\"0\")) limit = int(request.params.get(\"limit\", \"10\")) filters = { \"process_id\": request.params.get(\"process\",", "ml technique or something? price = random.uniform(0, 10) # nosec", "process_url = get_process_location(process_id, data_source=get_weaver_url(settings)) process_type = process.type process_params = dict()", "import submit_local_job if TYPE_CHECKING: from weaver.datatype import Process from weaver.typedefs", "try: process = process_store.fetch_by_id(process_id) except ProcessNotFound: raise HTTPNotFound(\"Could not find", "list of quotes IDs. \"\"\" page = int(request.params.get(\"page\", \"0\")) limit", "process_params = dict() for param in [\"inputs\", \"outputs\", \"mode\", \"response\"]:", "filters = { \"process_id\": request.params.get(\"process\", None) or request.matchdict.get(\"process_id\", None), \"page\":", "as sd from weaver.wps_restapi.processes.processes import submit_local_job if TYPE_CHECKING: from weaver.datatype", "OUTPUT_FORMAT_JSON from weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW from weaver.processes.wps_package import get_package_workflow_steps,", "quote.json() quote_json.pop(\"steps\", None) return HTTPCreated(json={\"quote\": quote_json}) # error if not", "process.type process_params = dict() for param in [\"inputs\", \"outputs\", \"mode\",", "the process execution. :param process: instance of :class:`weaver.datatype.Process` for which", "WEAVER_CONFIGURATION_EMS]: raise HTTPBadRequest(\"Unsupported request for configuration '{}'.\".format(weaver_config)) process_id = request.matchdict.get(\"process_id\")", "if param in request.json: process_params[param] = request.json.pop(param) process_quote_info = process_quote_estimator(process)", "return {\"price\": price, \"currency\": currency, \"estimatedTime\": estimated_time} @sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,", "provider ADES # TODO: data source mapping process_step_url = get_process_location(step[\"reference\"])", "if weaver_config not in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]: raise HTTPBadRequest(\"Unsupported request for", "\"quote\": quote_info.get(\"id\"), \"price\": quote_info.get(\"price\"), \"currency\": quote_info.get(\"currency\") } job_resp = submit_local_job(request)", "LOGGER = logging.getLogger(__name__) def process_quote_estimator(process): # noqa: E811 # type:", "weaver_config = get_weaver_configuration(settings) if weaver_config not in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]: raise", "list() for step in get_package_workflow_steps(process_url): # retrieve quote from provider", "\"\"\" page = int(request.params.get(\"page\", \"0\")) limit = int(request.params.get(\"limit\", \"10\")) filters", "request_quote(request): \"\"\" Request a quotation for a process. \"\"\" settings", "= get_quote_info(request).json[\"quote\"] quote_bill_info = { \"quote\": quote_info.get(\"id\"), \"price\": quote_info.get(\"price\"), \"currency\":", "= request.invoke_subrequest(subreq).json() quote_json = resp_json[\"quote\"] quote = store.save_quote(Quote(**quote_json)) workflow_quotes.append(quote.id) process_quote_info.update({\"steps\":", "something? price = random.uniform(0, 10) # nosec currency = \"CAD\"", "process with specified 'process_id'.\") store = get_db(request).get_store(StoreQuotes) process_url = get_process_location(process_id,", "point raise HTTPBadRequest(\"Unsupported quoting process type '{0}' on '{1}'.\".format(process_type, weaver_config))", "@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def execute_quote(request): \"\"\" Execute a quoted process. \"\"\"", "timedelta from typing import TYPE_CHECKING from duration import to_iso8601 from", "\"quotes\": [quote.id for quote in items] }) @sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,", "store.find_quotes(**filters) return HTTPOk(json={ \"count\": count, \"page\": page, \"limit\": limit, \"quotes\":", "retrieve quote from provider ADES # TODO: data source mapping", "request.json.pop(param) process_quote_info = process_quote_estimator(process) process_quote_info.update({ \"process\": process_id, \"processParameters\": process_params, \"location\":", "import logging import random from datetime import timedelta from typing", "\"\"\" Simulate quote parameters for the process execution. :param process:", "weaver.wps_restapi import swagger_definitions as sd from weaver.wps_restapi.processes.processes import submit_local_job if", "= int(request.params.get(\"limit\", \"10\")) filters = { \"process_id\": request.params.get(\"process\", None) or", "quote_json.pop(\"steps\", None) return HTTPCreated(json={\"quote\": quote_json}) # error if not handled", "= request.copy() subreq.path_info = process_quote_url resp_json = request.invoke_subrequest(subreq).json() quote_json =", "in request.json: process_params[param] = request.json.pop(param) process_quote_info = process_quote_estimator(process) process_quote_info.update({ \"process\":", "single application quotes (ADES or EMS) elif process_type == PROCESS_APPLICATION:", "schema=sd.PostQuote(), response_schemas=sd.post_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def execute_quote(request): \"\"\" Execute a quoted", "PROCESS_WORKFLOW from weaver.processes.wps_package import get_package_workflow_steps, get_process_location from weaver.store.base import StoreBills,", "# TODO: data source mapping process_step_url = get_process_location(step[\"reference\"]) process_quote_url =", "get_weaver_configuration from weaver.database import get_db from weaver.datatype import Bill, Quote", "data source mapping process_step_url = get_process_location(step[\"reference\"]) process_quote_url = \"{}/quotations\".format(process_step_url) subreq", "store.save_quote(Quote(**process_quote_info)) quote_json = quote.json() quote_json.pop(\"steps\", None) return HTTPCreated(json={\"quote\": quote_json}) #", "typing import TYPE_CHECKING from duration import to_iso8601 from pyramid.httpexceptions import", "weaver_config == WEAVER_CONFIGURATION_EMS: workflow_quotes = list() for step in get_package_workflow_steps(process_url):", "job_json = job_resp.json job_id = job_json.get(\"jobID\") user_id = str(request.authenticated_userid) store", "from weaver.datatype import Process from weaver.typedefs import JSON LOGGER =", "get_weaver_configuration(settings) if weaver_config not in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]: raise HTTPBadRequest(\"Unsupported request", "logging.getLogger(__name__) def process_quote_estimator(process): # noqa: E811 # type: (Process) ->", "request.invoke_subrequest(subreq).json() quote_json = resp_json[\"quote\"] quote = store.save_quote(Quote(**quote_json)) workflow_quotes.append(quote.id) process_quote_info.update({\"steps\": workflow_quotes})", "message=sd.InternalServerErrorResponseSchema.description) def execute_quote(request): \"\"\" Execute a quoted process. \"\"\" quote_info", "= process_store.fetch_by_id(process_id) except ProcessNotFound: raise HTTPNotFound(\"Could not find process with", "to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec return {\"price\": price, \"currency\": currency, \"estimatedTime\":", "60))) # nosec return {\"price\": price, \"currency\": currency, \"estimatedTime\": estimated_time}", "values for the process quote. \"\"\" # TODO: replace by", "not handled up to this point raise HTTPBadRequest(\"Unsupported quoting process", "get_process_location(step[\"reference\"]) process_quote_url = \"{}/quotations\".format(process_step_url) subreq = request.copy() subreq.path_info = process_quote_url", "request.matchdict.get(\"quote_id\") store = get_db(request).get_store(StoreQuotes) try: quote = store.fetch_by_id(quote_id) except QuoteNotFound:", "type: (Process) -> JSON \"\"\" Simulate quote parameters for the", "or something? price = random.uniform(0, 10) # nosec currency =", "= get_db(request).get_store(StoreQuotes) items, count = store.find_quotes(**filters) return HTTPOk(json={ \"count\": count,", "quote_json}) # error if not handled up to this point", "except QuoteNotFound: raise HTTPNotFound(\"Could not find quote with specified 'quote_id'.\")", "\"processParameters\": process_params, \"location\": process_url, \"user\": str(request.authenticated_userid) }) # loop workflow", "store.save_quote(Quote(**process_quote_info)) return HTTPCreated(json={\"quote\": quote.json()}) # single application quotes (ADES or", "information. \"\"\" quote_id = request.matchdict.get(\"quote_id\") store = get_db(request).get_store(StoreQuotes) try: quote", "schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def request_quote(request): \"\"\" Request a quotation", "import to_iso8601 from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk from", "store = get_db(request).get_store(StoreQuotes) try: quote = store.fetch_by_id(quote_id) except QuoteNotFound: raise", "of :class:`weaver.datatype.Process` for which to evaluate the quote. :return: dict", "for configuration '{}'.\".format(weaver_config)) process_id = request.matchdict.get(\"process_id\") process_store = get_db(request).get_store(\"processes\") try:", "job_id = job_json.get(\"jobID\") user_id = str(request.authenticated_userid) store = get_db(request).get_store(StoreBills) bill", "request.params.get(\"sort\", sort.SORT_CREATED), } store = get_db(request).get_store(StoreQuotes) items, count = store.find_quotes(**filters)", "TODO: data source mapping process_step_url = get_process_location(step[\"reference\"]) process_quote_url = \"{}/quotations\".format(process_step_url)", "= process_quote_estimator(process) process_quote_info.update({ \"process\": process_id, \"processParameters\": process_params, \"location\": process_url, \"user\":", "store = get_db(request).get_store(StoreQuotes) items, count = store.find_quotes(**filters) return HTTPOk(json={ \"count\":", "Bill, Quote from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions from weaver.formats", "process_id, \"processParameters\": process_params, \"location\": process_url, \"user\": str(request.authenticated_userid) }) # loop", "quoted process. \"\"\" quote_info = get_quote_info(request).json[\"quote\"] quote_bill_info = { \"quote\":", "get_weaver_url from weaver.wps_restapi import swagger_definitions as sd from weaver.wps_restapi.processes.processes import", "pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk from weaver import sort", "\"currency\": quote_info.get(\"currency\") } job_resp = submit_local_job(request) job_json = job_resp.json job_id", "IDs. \"\"\" page = int(request.params.get(\"page\", \"0\")) limit = int(request.params.get(\"limit\", \"10\"))", "schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses) @log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description) def get_quote_info(request): \"\"\" Get quote information.", "from datetime import timedelta from typing import TYPE_CHECKING from duration", "= { \"quote\": quote_info.get(\"id\"), \"price\": quote_info.get(\"price\"), \"currency\": quote_info.get(\"currency\") } job_resp", "\"page\": page, \"limit\": limit, \"sort\": request.params.get(\"sort\", sort.SORT_CREATED), } store =", "= { \"process_id\": request.params.get(\"process\", None) or request.matchdict.get(\"process_id\", None), \"page\": page," ]
[ "header=None): if header is None: base = st else: col,", "Div APP_URL = os.environ[\"APP_URL\"] STRAVA_CLIENT_ID = os.environ[\"STRAVA_CLIENT_ID\"] STRAVA_CLIENT_SECRET = os.environ[\"STRAVA_CLIENT_SECRET\"]", "style=\\\"color:{STRAVA_ORANGE};\\\">View on Strava</a>\", unsafe_allow_html=True ) return activity @st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True)", "30. Increment this field to go to the next page.\",", "of pages.\") st.stop() default_activity = {\"name\": DEFAULT_ACTIVITY_LABEL, \"start_date_local\": \"\"} activity", "is None: base = st else: col, _, _, _", "- {date_string} ({human_readable_date})\" def select_strava_activity(auth): col1, col2 = st.beta_columns([1, 3])", "st.experimental_set_query_params(session=authorization_code) return strava_auth def header(): col1, col2, col3 = st.beta_columns(3)", "url=f\"{STRAVA_API_BASE_URL}/athlete/activities\", params={ \"page\": page, }, headers={ \"Authorization\": f\"Bearer {access_token}\", },", "alt=\\\"strava login\\\" src=\\\"data:image/png;base64,{base64_image}\\\" width=\\\"100%\\\">\" f\"</a>\" ), unsafe_allow_html=True, ) def logout_header(header=None):", "activity\", options=[default_activity] + activities, format_func=activity_label, ) if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL:", "chunks of 30. Increment this field to go to the", "params={ \"client_id\": STRAVA_CLIENT_ID, \"redirect_uri\": APP_URL, \"response_type\": \"code\", \"approval_prompt\": \"auto\", \"scope\":", "= header base = button with col1: powered_by_strava_logo() base64_image =", "powered_by_strava_logo() base64_image = load_image_as_base64(\"./static/btn_strava_connectwith_orange@2x.png\") base.markdown( ( f\"<a href=\\\"{strava_authorization_url}\\\">\" f\" <img", "sweat from bokeh.models.widgets import Div APP_URL = os.environ[\"APP_URL\"] STRAVA_CLIENT_ID =", "= st else: _, col2, _, button = header base", "= st.selectbox( label=\"Select an activity\", options=[default_activity] + activities, format_func=activity_label, )", "def logged_in_title(strava_auth, header=None): if header is None: base = st", "with open(image_path, \"rb\") as f: contents = f.read() return base64.b64encode(contents).decode(\"utf-8\")", "contents = f.read() return base64.b64encode(contents).decode(\"utf-8\") def powered_by_strava_logo(): base64_image = load_image_as_base64(\"./static/api_logo_pwrdBy_strava_horiz_light.png\")", "open(image_path, \"rb\") as f: contents = f.read() return base64.b64encode(contents).decode(\"utf-8\") def", "= f\"https://www.strava.com/activities/{activity['id']}\" st.markdown( f\"<a href=\\\"{activity_url}\\\" style=\\\"color:{STRAVA_ORANGE};\\\">View on Strava</a>\", unsafe_allow_html=True )", "of 30. Increment this field to go to the next", "= button with col2: powered_by_strava_logo() if base.button(\"Log out\"): js =", "reload and try again\") st.experimental_set_query_params() st.stop() return strava_auth = response.json()", "format_func=activity_label, ) if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: st.write(\"No activity selected\") st.stop()", "STRAVA_AUTHORIZATION_URL = \"https://www.strava.com/oauth/authorize\" STRAVA_API_BASE_URL = \"https://www.strava.com/api/v3\" DEFAULT_ACTIVITY_LABEL = \"NO_ACTIVITY_SELECTED\" STRAVA_ORANGE", "st.experimental_set_query_params() st.stop() return strava_auth = response.json() return strava_auth def authenticate(header=None,", "'{APP_URL}'\" html = f\"<img src onerror=\\\"{js}\\\">\" div = Div(text=html) st.bokeh_chart(div)", "get_activities(auth=auth, page=page) if not activities: st.info(\"This Strava account has no", "st.stop() default_activity = {\"name\": DEFAULT_ACTIVITY_LABEL, \"start_date_local\": \"\"} activity = st.selectbox(", "= f.read() return base64.b64encode(contents).decode(\"utf-8\") def powered_by_strava_logo(): base64_image = load_image_as_base64(\"./static/api_logo_pwrdBy_strava_horiz_light.png\") st.markdown(", "col1: page = st.number_input( label=\"Activities page\", min_value=1, help=\"The Strava API", "= start_date.humanize(granularity=[\"day\"]) date_string = start_date.format(\"YYYY-MM-DD\") return f\"{activity['name']} - {date_string} ({human_readable_date})\"", "col3: strava_button = st.empty() return col1, col2, col3, strava_button @st.cache(show_spinner=False)", "activities, format_func=activity_label, ) if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: st.write(\"No activity selected\")", "def login_header(header=None): strava_authorization_url = authorization_url() if header is None: base", "= st.empty() return col1, col2, col3, strava_button @st.cache(show_spinner=False) def get_activities(auth,", "if header is None: base = st else: _, col2,", "STRAVA_CLIENT_ID = os.environ[\"STRAVA_CLIENT_ID\"] STRAVA_CLIENT_SECRET = os.environ[\"STRAVA_CLIENT_SECRET\"] STRAVA_AUTHORIZATION_URL = \"https://www.strava.com/oauth/authorize\" STRAVA_API_BASE_URL", "STRAVA_CLIENT_SECRET = os.environ[\"STRAVA_CLIENT_SECRET\"] STRAVA_AUTHORIZATION_URL = \"https://www.strava.com/oauth/authorize\" STRAVA_API_BASE_URL = \"https://www.strava.com/api/v3\" DEFAULT_ACTIVITY_LABEL", "auth[\"access_token\"] response = httpx.get( url=f\"{STRAVA_API_BASE_URL}/athlete/activities\", params={ \"page\": page, }, headers={", "activities = get_activities(auth=auth, page=page) if not activities: st.info(\"This Strava account", "col1, col2, col3, strava_button @st.cache(show_spinner=False) def get_activities(auth, page=1): access_token =", "else: _, col2, _, button = header base = button", "arrow.get(activity[\"start_date_local\"]) human_readable_date = start_date.humanize(granularity=[\"day\"]) date_string = start_date.format(\"YYYY-MM-DD\") return f\"{activity['name']} -", "strava_auth = response.json() return strava_auth def authenticate(header=None, stop_if_unauthenticated=True): query_params =", "\"code\", \"approval_prompt\": \"auto\", \"scope\": \"activity:read_all\" } ) return request.url def", "st.beta_columns([1, 3]) with col1: page = st.number_input( label=\"Activities page\", min_value=1,", "\"code\": authorization_code, \"grant_type\": \"authorization_code\", } ) try: response.raise_for_status() except httpx.HTTPStatusError:", "options=[default_activity] + activities, format_func=activity_label, ) if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: st.write(\"No", "= arrow.get(activity[\"start_date_local\"]) human_readable_date = start_date.humanize(granularity=[\"day\"]) date_string = start_date.format(\"YYYY-MM-DD\") return f\"{activity['name']}", "= auth[\"access_token\"] response = httpx.get( url=f\"{STRAVA_API_BASE_URL}/athlete/activities\", params={ \"page\": page, },", "= load_image_as_base64(\"./static/api_logo_pwrdBy_strava_horiz_light.png\") st.markdown( f'<img src=\"data:image/png;base64,{base64_image}\" width=\"100%\" alt=\"powered by strava\">', unsafe_allow_html=True,", "st.selectbox( label=\"Select an activity\", options=[default_activity] + activities, format_func=activity_label, ) if", "activity @st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True) def download_activity(activity, strava_auth): with st.spinner(f\"Downloading activity", "strava_auth = exchange_authorization_code(authorization_code) logged_in_title(strava_auth, header) st.experimental_set_query_params(session=authorization_code) return strava_auth def header():", "\"NO_ACTIVITY_SELECTED\" STRAVA_ORANGE = \"#fc4c02\" @st.cache(show_spinner=False) def load_image_as_base64(image_path): with open(image_path, \"rb\")", "out of pages.\") st.stop() default_activity = {\"name\": DEFAULT_ACTIVITY_LABEL, \"start_date_local\": \"\"}", "\"approval_prompt\": \"auto\", \"scope\": \"activity:read_all\" } ) return request.url def login_header(header=None):", "button = header base = button with col1: powered_by_strava_logo() base64_image", "Div(text=html) st.bokeh_chart(div) def logged_in_title(strava_auth, header=None): if header is None: base", "def header(): col1, col2, col3 = st.beta_columns(3) with col3: strava_button", "st import sweat from bokeh.models.widgets import Div APP_URL = os.environ[\"APP_URL\"]", "col2, col3 = st.beta_columns(3) with col3: strava_button = st.empty() return", "src onerror=\\\"{js}\\\">\" div = Div(text=html) st.bokeh_chart(div) def logged_in_title(strava_auth, header=None): if", ") return response.json() def activity_label(activity): if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: return", "return activity @st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True) def download_activity(activity, strava_auth): with st.spinner(f\"Downloading", "f\" <img alt=\\\"strava login\\\" src=\\\"data:image/png;base64,{base64_image}\\\" width=\\\"100%\\\">\" f\"</a>\" ), unsafe_allow_html=True, )", "\"activity:read_all\" } ) return request.url def login_header(header=None): strava_authorization_url = authorization_url()", "activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: st.write(\"No activity selected\") st.stop() return activity_url =", "os import arrow import httpx import streamlit as st import", "f\"{activity['name']} - {date_string} ({human_readable_date})\" def select_strava_activity(auth): col1, col2 = st.beta_columns([1,", "col1: powered_by_strava_logo() base64_image = load_image_as_base64(\"./static/btn_strava_connectwith_orange@2x.png\") base.markdown( ( f\"<a href=\\\"{strava_authorization_url}\\\">\" f\"", "DEFAULT_ACTIVITY_LABEL, \"start_date_local\": \"\"} activity = st.selectbox( label=\"Select an activity\", options=[default_activity]", "src=\\\"data:image/png;base64,{base64_image}\\\" width=\\\"100%\\\">\" f\"</a>\" ), unsafe_allow_html=True, ) def logout_header(header=None): if header", "base64 import os import arrow import httpx import streamlit as", "import base64 import os import arrow import httpx import streamlit", "st.stop() return activity_url = f\"https://www.strava.com/activities/{activity['id']}\" st.markdown( f\"<a href=\\\"{activity_url}\\\" style=\\\"color:{STRAVA_ORANGE};\\\">View on", "and try again\") st.experimental_set_query_params() st.stop() return strava_auth = response.json() return", "col2: powered_by_strava_logo() if base.button(\"Log out\"): js = f\"window.location.href = '{APP_URL}'\"", "[None])[0] if authorization_code is None: login_header(header=header) if stop_if_unauthenticated: st.stop() return", "else: logout_header(header=header) strava_auth = exchange_authorization_code(authorization_code) logged_in_title(strava_auth, header) st.experimental_set_query_params(session=authorization_code) return strava_auth", "bokeh.models.widgets import Div APP_URL = os.environ[\"APP_URL\"] STRAVA_CLIENT_ID = os.environ[\"STRAVA_CLIENT_ID\"] STRAVA_CLIENT_SECRET", ") def logout_header(header=None): if header is None: base = st", "STRAVA_CLIENT_ID, \"client_secret\": STRAVA_CLIENT_SECRET, \"code\": authorization_code, \"grant_type\": \"authorization_code\", } ) try:", "\"auto\", \"scope\": \"activity:read_all\" } ) return request.url def login_header(header=None): strava_authorization_url", "= \"NO_ACTIVITY_SELECTED\" STRAVA_ORANGE = \"#fc4c02\" @st.cache(show_spinner=False) def load_image_as_base64(image_path): with open(image_path,", "base = st else: _, col2, _, button = header", "powered_by_strava_logo() if base.button(\"Log out\"): js = f\"window.location.href = '{APP_URL}'\" html", "_ = header base = col first_name = strava_auth[\"athlete\"][\"firstname\"] last_name", "STRAVA_CLIENT_SECRET, \"code\": authorization_code, \"grant_type\": \"authorization_code\", } ) try: response.raise_for_status() except", "= load_image_as_base64(\"./static/btn_strava_connectwith_orange@2x.png\") base.markdown( ( f\"<a href=\\\"{strava_authorization_url}\\\">\" f\" <img alt=\\\"strava login\\\"", "start_date.humanize(granularity=[\"day\"]) date_string = start_date.format(\"YYYY-MM-DD\") return f\"{activity['name']} - {date_string} ({human_readable_date})\" def", "\"#fc4c02\" @st.cache(show_spinner=False) def load_image_as_base64(image_path): with open(image_path, \"rb\") as f: contents", "def get_activities(auth, page=1): access_token = auth[\"access_token\"] response = httpx.get( url=f\"{STRAVA_API_BASE_URL}/athlete/activities\",", "logged_in_title(strava_auth, header=None): if header is None: base = st else:", "= st else: col, _, _, _ = header base", "None: authorization_code = query_params.get(\"session\", [None])[0] if authorization_code is None: login_header(header=header)", "_, button = header base = button with col2: powered_by_strava_logo()", "method=\"GET\", url=STRAVA_AUTHORIZATION_URL, params={ \"client_id\": STRAVA_CLIENT_ID, \"redirect_uri\": APP_URL, \"response_type\": \"code\", \"approval_prompt\":", "next page.\", ) with col2: activities = get_activities(auth=auth, page=page) if", "Strava. Please reload and try again\") st.experimental_set_query_params() st.stop() return strava_auth", "header is None: base = st else: col1, _, _,", "you ran out of pages.\") st.stop() default_activity = {\"name\": DEFAULT_ACTIVITY_LABEL,", "select_strava_activity(auth): col1, col2 = st.beta_columns([1, 3]) with col1: page =", "st else: col, _, _, _ = header base =", "3]) with col1: page = st.number_input( label=\"Activities page\", min_value=1, help=\"The", "page=page) if not activities: st.info(\"This Strava account has no activities", "if authorization_code is None: login_header(header=header) if stop_if_unauthenticated: st.stop() return else:", "html = f\"<img src onerror=\\\"{js}\\\">\" div = Div(text=html) st.bokeh_chart(div) def", "return else: logout_header(header=header) strava_auth = exchange_authorization_code(authorization_code) logged_in_title(strava_auth, header) st.experimental_set_query_params(session=authorization_code) return", "\"https://www.strava.com/api/v3\" DEFAULT_ACTIVITY_LABEL = \"NO_ACTIVITY_SELECTED\" STRAVA_ORANGE = \"#fc4c02\" @st.cache(show_spinner=False) def load_image_as_base64(image_path):", "= authorization_url() if header is None: base = st else:", "col2 = st.beta_columns([1, 3]) with col1: page = st.number_input( label=\"Activities", "an activity\", options=[default_activity] + activities, format_func=activity_label, ) if activity[\"name\"] ==", "your activities in chunks of 30. Increment this field to", "= header base = button with col2: powered_by_strava_logo() if base.button(\"Log", "def exchange_authorization_code(authorization_code): response = httpx.post( url=\"https://www.strava.com/oauth/token\", json={ \"client_id\": STRAVA_CLIENT_ID, \"client_secret\":", "base = st else: col1, _, _, button = header", "httpx.Request( method=\"GET\", url=STRAVA_AUTHORIZATION_URL, params={ \"client_id\": STRAVA_CLIENT_ID, \"redirect_uri\": APP_URL, \"response_type\": \"code\",", ") return request.url def login_header(header=None): strava_authorization_url = authorization_url() if header", "header base = col first_name = strava_auth[\"athlete\"][\"firstname\"] last_name = strava_auth[\"athlete\"][\"lastname\"]", "f: contents = f.read() return base64.b64encode(contents).decode(\"utf-8\") def powered_by_strava_logo(): base64_image =", "base = col first_name = strava_auth[\"athlete\"][\"firstname\"] last_name = strava_auth[\"athlete\"][\"lastname\"] col.markdown(f\"*Welcome,", "authorization_code = query_params.get(\"code\", [None])[0] if authorization_code is None: authorization_code =", "== DEFAULT_ACTIVITY_LABEL: st.write(\"No activity selected\") st.stop() return activity_url = f\"https://www.strava.com/activities/{activity['id']}\"", "import arrow import httpx import streamlit as st import sweat", "authorization_code, \"grant_type\": \"authorization_code\", } ) try: response.raise_for_status() except httpx.HTTPStatusError: st.error(\"Something", "query_params.get(\"session\", [None])[0] if authorization_code is None: login_header(header=header) if stop_if_unauthenticated: st.stop()", "load_image_as_base64(image_path): with open(image_path, \"rb\") as f: contents = f.read() return", "strava\">', unsafe_allow_html=True, ) def authorization_url(): request = httpx.Request( method=\"GET\", url=STRAVA_AUTHORIZATION_URL,", "{first_name} {last_name}!*\") @st.cache(show_spinner=False, suppress_st_warning=True) def exchange_authorization_code(authorization_code): response = httpx.post( url=\"https://www.strava.com/oauth/token\",", "= strava_auth[\"athlete\"][\"firstname\"] last_name = strava_auth[\"athlete\"][\"lastname\"] col.markdown(f\"*Welcome, {first_name} {last_name}!*\") @st.cache(show_spinner=False, suppress_st_warning=True)", "query_params.get(\"code\", [None])[0] if authorization_code is None: authorization_code = query_params.get(\"session\", [None])[0]", "arrow import httpx import streamlit as st import sweat from", "= httpx.get( url=f\"{STRAVA_API_BASE_URL}/athlete/activities\", params={ \"page\": page, }, headers={ \"Authorization\": f\"Bearer", "= start_date.format(\"YYYY-MM-DD\") return f\"{activity['name']} - {date_string} ({human_readable_date})\" def select_strava_activity(auth): col1,", "as st import sweat from bokeh.models.widgets import Div APP_URL =", "APP_URL = os.environ[\"APP_URL\"] STRAVA_CLIENT_ID = os.environ[\"STRAVA_CLIENT_ID\"] STRAVA_CLIENT_SECRET = os.environ[\"STRAVA_CLIENT_SECRET\"] STRAVA_AUTHORIZATION_URL", "return strava_auth def authenticate(header=None, stop_if_unauthenticated=True): query_params = st.experimental_get_query_params() authorization_code =", "not activities: st.info(\"This Strava account has no activities or you", "st.markdown( f\"<a href=\\\"{activity_url}\\\" style=\\\"color:{STRAVA_ORANGE};\\\">View on Strava</a>\", unsafe_allow_html=True ) return activity", "ran out of pages.\") st.stop() default_activity = {\"name\": DEFAULT_ACTIVITY_LABEL, \"start_date_local\":", "max_entries=30, allow_output_mutation=True) def download_activity(activity, strava_auth): with st.spinner(f\"Downloading activity \\\"{activity['name']}\\\"...\"): return", "if base.button(\"Log out\"): js = f\"window.location.href = '{APP_URL}'\" html =", "activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: return \"\" start_date = arrow.get(activity[\"start_date_local\"]) human_readable_date =", "button with col1: powered_by_strava_logo() base64_image = load_image_as_base64(\"./static/btn_strava_connectwith_orange@2x.png\") base.markdown( ( f\"<a", "None: base = st else: col, _, _, _ =", "\"grant_type\": \"authorization_code\", } ) try: response.raise_for_status() except httpx.HTTPStatusError: st.error(\"Something went", "has no activities or you ran out of pages.\") st.stop()", "}, ) return response.json() def activity_label(activity): if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL:", "unsafe_allow_html=True, ) def logout_header(header=None): if header is None: base =", "authorization_code = query_params.get(\"session\", [None])[0] if authorization_code is None: login_header(header=header) if", "response = httpx.post( url=\"https://www.strava.com/oauth/token\", json={ \"client_id\": STRAVA_CLIENT_ID, \"client_secret\": STRAVA_CLIENT_SECRET, \"code\":", "activity = st.selectbox( label=\"Select an activity\", options=[default_activity] + activities, format_func=activity_label,", "label=\"Activities page\", min_value=1, help=\"The Strava API returns your activities in", "= os.environ[\"STRAVA_CLIENT_ID\"] STRAVA_CLIENT_SECRET = os.environ[\"STRAVA_CLIENT_SECRET\"] STRAVA_AUTHORIZATION_URL = \"https://www.strava.com/oauth/authorize\" STRAVA_API_BASE_URL =", "alt=\"powered by strava\">', unsafe_allow_html=True, ) def authorization_url(): request = httpx.Request(", "== DEFAULT_ACTIVITY_LABEL: return \"\" start_date = arrow.get(activity[\"start_date_local\"]) human_readable_date = start_date.humanize(granularity=[\"day\"])", "base = st else: col, _, _, _ = header", "date_string = start_date.format(\"YYYY-MM-DD\") return f\"{activity['name']} - {date_string} ({human_readable_date})\" def select_strava_activity(auth):", "href=\\\"{activity_url}\\\" style=\\\"color:{STRAVA_ORANGE};\\\">View on Strava</a>\", unsafe_allow_html=True ) return activity @st.cache(show_spinner=False, max_entries=30,", "= \"#fc4c02\" @st.cache(show_spinner=False) def load_image_as_base64(image_path): with open(image_path, \"rb\") as f:", "<reponame>AartGoossens/streamlit-activity-viewer import base64 import os import arrow import httpx import", "os.environ[\"APP_URL\"] STRAVA_CLIENT_ID = os.environ[\"STRAVA_CLIENT_ID\"] STRAVA_CLIENT_SECRET = os.environ[\"STRAVA_CLIENT_SECRET\"] STRAVA_AUTHORIZATION_URL = \"https://www.strava.com/oauth/authorize\"", "= \"https://www.strava.com/api/v3\" DEFAULT_ACTIVITY_LABEL = \"NO_ACTIVITY_SELECTED\" STRAVA_ORANGE = \"#fc4c02\" @st.cache(show_spinner=False) def", "= st.beta_columns(3) with col3: strava_button = st.empty() return col1, col2,", "streamlit as st import sweat from bokeh.models.widgets import Div APP_URL", "header is None: base = st else: col, _, _,", "strava_button @st.cache(show_spinner=False) def get_activities(auth, page=1): access_token = auth[\"access_token\"] response =", "start_date = arrow.get(activity[\"start_date_local\"]) human_readable_date = start_date.humanize(granularity=[\"day\"]) date_string = start_date.format(\"YYYY-MM-DD\") return", "to the next page.\", ) with col2: activities = get_activities(auth=auth,", "no activities or you ran out of pages.\") st.stop() default_activity", ") def authorization_url(): request = httpx.Request( method=\"GET\", url=STRAVA_AUTHORIZATION_URL, params={ \"client_id\":", "is None: base = st else: col1, _, _, button", "width=\\\"100%\\\">\" f\"</a>\" ), unsafe_allow_html=True, ) def logout_header(header=None): if header is", "try again\") st.experimental_set_query_params() st.stop() return strava_auth = response.json() return strava_auth", "\"page\": page, }, headers={ \"Authorization\": f\"Bearer {access_token}\", }, ) return", "stop_if_unauthenticated=True): query_params = st.experimental_get_query_params() authorization_code = query_params.get(\"code\", [None])[0] if authorization_code", "f\"Bearer {access_token}\", }, ) return response.json() def activity_label(activity): if activity[\"name\"]", "st else: _, col2, _, button = header base =", "st.markdown( f'<img src=\"data:image/png;base64,{base64_image}\" width=\"100%\" alt=\"powered by strava\">', unsafe_allow_html=True, ) def", "f\"https://www.strava.com/activities/{activity['id']}\" st.markdown( f\"<a href=\\\"{activity_url}\\\" style=\\\"color:{STRAVA_ORANGE};\\\">View on Strava</a>\", unsafe_allow_html=True ) return", "f.read() return base64.b64encode(contents).decode(\"utf-8\") def powered_by_strava_logo(): base64_image = load_image_as_base64(\"./static/api_logo_pwrdBy_strava_horiz_light.png\") st.markdown( f'<img", "authorization_url(): request = httpx.Request( method=\"GET\", url=STRAVA_AUTHORIZATION_URL, params={ \"client_id\": STRAVA_CLIENT_ID, \"redirect_uri\":", "f\"<a href=\\\"{strava_authorization_url}\\\">\" f\" <img alt=\\\"strava login\\\" src=\\\"data:image/png;base64,{base64_image}\\\" width=\\\"100%\\\">\" f\"</a>\" ),", "st.number_input( label=\"Activities page\", min_value=1, help=\"The Strava API returns your activities", "import sweat from bokeh.models.widgets import Div APP_URL = os.environ[\"APP_URL\"] STRAVA_CLIENT_ID", "url=\"https://www.strava.com/oauth/token\", json={ \"client_id\": STRAVA_CLIENT_ID, \"client_secret\": STRAVA_CLIENT_SECRET, \"code\": authorization_code, \"grant_type\": \"authorization_code\",", "header base = button with col2: powered_by_strava_logo() if base.button(\"Log out\"):", "with Strava. Please reload and try again\") st.experimental_set_query_params() st.stop() return", "st.experimental_get_query_params() authorization_code = query_params.get(\"code\", [None])[0] if authorization_code is None: authorization_code", "col, _, _, _ = header base = col first_name", "if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: return \"\" start_date = arrow.get(activity[\"start_date_local\"]) human_readable_date", "is None: base = st else: _, col2, _, button", "= st.beta_columns([1, 3]) with col1: page = st.number_input( label=\"Activities page\",", "logout_header(header=None): if header is None: base = st else: _,", "DEFAULT_ACTIVITY_LABEL: st.write(\"No activity selected\") st.stop() return activity_url = f\"https://www.strava.com/activities/{activity['id']}\" st.markdown(", ") return activity @st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True) def download_activity(activity, strava_auth): with", "on Strava</a>\", unsafe_allow_html=True ) return activity @st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True) def", "def activity_label(activity): if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: return \"\" start_date =", "\"https://www.strava.com/oauth/authorize\" STRAVA_API_BASE_URL = \"https://www.strava.com/api/v3\" DEFAULT_ACTIVITY_LABEL = \"NO_ACTIVITY_SELECTED\" STRAVA_ORANGE = \"#fc4c02\"", "col.markdown(f\"*Welcome, {first_name} {last_name}!*\") @st.cache(show_spinner=False, suppress_st_warning=True) def exchange_authorization_code(authorization_code): response = httpx.post(", "return base64.b64encode(contents).decode(\"utf-8\") def powered_by_strava_logo(): base64_image = load_image_as_base64(\"./static/api_logo_pwrdBy_strava_horiz_light.png\") st.markdown( f'<img src=\"data:image/png;base64,{base64_image}\"", "return \"\" start_date = arrow.get(activity[\"start_date_local\"]) human_readable_date = start_date.humanize(granularity=[\"day\"]) date_string =", "= Div(text=html) st.bokeh_chart(div) def logged_in_title(strava_auth, header=None): if header is None:", "json={ \"client_id\": STRAVA_CLIENT_ID, \"client_secret\": STRAVA_CLIENT_SECRET, \"code\": authorization_code, \"grant_type\": \"authorization_code\", }", "by strava\">', unsafe_allow_html=True, ) def authorization_url(): request = httpx.Request( method=\"GET\",", "button with col2: powered_by_strava_logo() if base.button(\"Log out\"): js = f\"window.location.href", "f\"<a href=\\\"{activity_url}\\\" style=\\\"color:{STRAVA_ORANGE};\\\">View on Strava</a>\", unsafe_allow_html=True ) return activity @st.cache(show_spinner=False,", "_, _ = header base = col first_name = strava_auth[\"athlete\"][\"firstname\"]", "except httpx.HTTPStatusError: st.error(\"Something went wrong while authenticating with Strava. Please", "None: login_header(header=header) if stop_if_unauthenticated: st.stop() return else: logout_header(header=header) strava_auth =", "if authorization_code is None: authorization_code = query_params.get(\"session\", [None])[0] if authorization_code", "APP_URL, \"response_type\": \"code\", \"approval_prompt\": \"auto\", \"scope\": \"activity:read_all\" } ) return", "href=\\\"{strava_authorization_url}\\\">\" f\" <img alt=\\\"strava login\\\" src=\\\"data:image/png;base64,{base64_image}\\\" width=\\\"100%\\\">\" f\"</a>\" ), unsafe_allow_html=True,", "with col2: powered_by_strava_logo() if base.button(\"Log out\"): js = f\"window.location.href =", "= col first_name = strava_auth[\"athlete\"][\"firstname\"] last_name = strava_auth[\"athlete\"][\"lastname\"] col.markdown(f\"*Welcome, {first_name}", "f'<img src=\"data:image/png;base64,{base64_image}\" width=\"100%\" alt=\"powered by strava\">', unsafe_allow_html=True, ) def authorization_url():", "the next page.\", ) with col2: activities = get_activities(auth=auth, page=page)", "activities or you ran out of pages.\") st.stop() default_activity =", "os.environ[\"STRAVA_CLIENT_ID\"] STRAVA_CLIENT_SECRET = os.environ[\"STRAVA_CLIENT_SECRET\"] STRAVA_AUTHORIZATION_URL = \"https://www.strava.com/oauth/authorize\" STRAVA_API_BASE_URL = \"https://www.strava.com/api/v3\"", "authorization_code is None: login_header(header=header) if stop_if_unauthenticated: st.stop() return else: logout_header(header=header)", "with col1: page = st.number_input( label=\"Activities page\", min_value=1, help=\"The Strava", "params={ \"page\": page, }, headers={ \"Authorization\": f\"Bearer {access_token}\", }, )", "return response.json() def activity_label(activity): if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: return \"\"", "from bokeh.models.widgets import Div APP_URL = os.environ[\"APP_URL\"] STRAVA_CLIENT_ID = os.environ[\"STRAVA_CLIENT_ID\"]", "returns your activities in chunks of 30. Increment this field", "if not activities: st.info(\"This Strava account has no activities or", "col3 = st.beta_columns(3) with col3: strava_button = st.empty() return col1,", "this field to go to the next page.\", ) with", "Increment this field to go to the next page.\", )", "Strava API returns your activities in chunks of 30. Increment", "@st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True) def download_activity(activity, strava_auth): with st.spinner(f\"Downloading activity \\\"{activity['name']}\\\"...\"):", "get_activities(auth, page=1): access_token = auth[\"access_token\"] response = httpx.get( url=f\"{STRAVA_API_BASE_URL}/athlete/activities\", params={", "request.url def login_header(header=None): strava_authorization_url = authorization_url() if header is None:", "STRAVA_ORANGE = \"#fc4c02\" @st.cache(show_spinner=False) def load_image_as_base64(image_path): with open(image_path, \"rb\") as", "), unsafe_allow_html=True, ) def logout_header(header=None): if header is None: base", "@st.cache(show_spinner=False, suppress_st_warning=True) def exchange_authorization_code(authorization_code): response = httpx.post( url=\"https://www.strava.com/oauth/token\", json={ \"client_id\":", "last_name = strava_auth[\"athlete\"][\"lastname\"] col.markdown(f\"*Welcome, {first_name} {last_name}!*\") @st.cache(show_spinner=False, suppress_st_warning=True) def exchange_authorization_code(authorization_code):", "strava_button = st.empty() return col1, col2, col3, strava_button @st.cache(show_spinner=False) def", "logged_in_title(strava_auth, header) st.experimental_set_query_params(session=authorization_code) return strava_auth def header(): col1, col2, col3", "import streamlit as st import sweat from bokeh.models.widgets import Div", "col1, col2 = st.beta_columns([1, 3]) with col1: page = st.number_input(", "base = button with col1: powered_by_strava_logo() base64_image = load_image_as_base64(\"./static/btn_strava_connectwith_orange@2x.png\") base.markdown(", "def logout_header(header=None): if header is None: base = st else:", "({human_readable_date})\" def select_strava_activity(auth): col1, col2 = st.beta_columns([1, 3]) with col1:", "allow_output_mutation=True) def download_activity(activity, strava_auth): with st.spinner(f\"Downloading activity \\\"{activity['name']}\\\"...\"): return sweat.read_strava(activity[\"id\"],", "= os.environ[\"STRAVA_CLIENT_SECRET\"] STRAVA_AUTHORIZATION_URL = \"https://www.strava.com/oauth/authorize\" STRAVA_API_BASE_URL = \"https://www.strava.com/api/v3\" DEFAULT_ACTIVITY_LABEL =", "div = Div(text=html) st.bokeh_chart(div) def logged_in_title(strava_auth, header=None): if header is", "suppress_st_warning=True) def exchange_authorization_code(authorization_code): response = httpx.post( url=\"https://www.strava.com/oauth/token\", json={ \"client_id\": STRAVA_CLIENT_ID,", "base.button(\"Log out\"): js = f\"window.location.href = '{APP_URL}'\" html = f\"<img", "DEFAULT_ACTIVITY_LABEL: return \"\" start_date = arrow.get(activity[\"start_date_local\"]) human_readable_date = start_date.humanize(granularity=[\"day\"]) date_string", "= query_params.get(\"code\", [None])[0] if authorization_code is None: authorization_code = query_params.get(\"session\",", "try: response.raise_for_status() except httpx.HTTPStatusError: st.error(\"Something went wrong while authenticating with", "st.beta_columns(3) with col3: strava_button = st.empty() return col1, col2, col3,", "= '{APP_URL}'\" html = f\"<img src onerror=\\\"{js}\\\">\" div = Div(text=html)", "request = httpx.Request( method=\"GET\", url=STRAVA_AUTHORIZATION_URL, params={ \"client_id\": STRAVA_CLIENT_ID, \"redirect_uri\": APP_URL,", "st.stop() return else: logout_header(header=header) strava_auth = exchange_authorization_code(authorization_code) logged_in_title(strava_auth, header) st.experimental_set_query_params(session=authorization_code)", "httpx.post( url=\"https://www.strava.com/oauth/token\", json={ \"client_id\": STRAVA_CLIENT_ID, \"client_secret\": STRAVA_CLIENT_SECRET, \"code\": authorization_code, \"grant_type\":", "{access_token}\", }, ) return response.json() def activity_label(activity): if activity[\"name\"] ==", "unsafe_allow_html=True ) return activity @st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True) def download_activity(activity, strava_auth):", "\"authorization_code\", } ) try: response.raise_for_status() except httpx.HTTPStatusError: st.error(\"Something went wrong", "base64.b64encode(contents).decode(\"utf-8\") def powered_by_strava_logo(): base64_image = load_image_as_base64(\"./static/api_logo_pwrdBy_strava_horiz_light.png\") st.markdown( f'<img src=\"data:image/png;base64,{base64_image}\" width=\"100%\"", "unsafe_allow_html=True, ) def authorization_url(): request = httpx.Request( method=\"GET\", url=STRAVA_AUTHORIZATION_URL, params={", "= query_params.get(\"session\", [None])[0] if authorization_code is None: login_header(header=header) if stop_if_unauthenticated:", "header(): col1, col2, col3 = st.beta_columns(3) with col3: strava_button =", "in chunks of 30. Increment this field to go to", "} ) return request.url def login_header(header=None): strava_authorization_url = authorization_url() if", "\"rb\") as f: contents = f.read() return base64.b64encode(contents).decode(\"utf-8\") def powered_by_strava_logo():", "again\") st.experimental_set_query_params() st.stop() return strava_auth = response.json() return strava_auth def", "exchange_authorization_code(authorization_code): response = httpx.post( url=\"https://www.strava.com/oauth/token\", json={ \"client_id\": STRAVA_CLIENT_ID, \"client_secret\": STRAVA_CLIENT_SECRET,", "\"client_id\": STRAVA_CLIENT_ID, \"redirect_uri\": APP_URL, \"response_type\": \"code\", \"approval_prompt\": \"auto\", \"scope\": \"activity:read_all\"", "base = button with col2: powered_by_strava_logo() if base.button(\"Log out\"): js", "col1, _, _, button = header base = button with", "logout_header(header=header) strava_auth = exchange_authorization_code(authorization_code) logged_in_title(strava_auth, header) st.experimental_set_query_params(session=authorization_code) return strava_auth def", "return strava_auth def header(): col1, col2, col3 = st.beta_columns(3) with", "with col2: activities = get_activities(auth=auth, page=page) if not activities: st.info(\"This", "= st else: col1, _, _, button = header base", "= button with col1: powered_by_strava_logo() base64_image = load_image_as_base64(\"./static/btn_strava_connectwith_orange@2x.png\") base.markdown( (", "_, _, button = header base = button with col1:", "return strava_auth = response.json() return strava_auth def authenticate(header=None, stop_if_unauthenticated=True): query_params", "( f\"<a href=\\\"{strava_authorization_url}\\\">\" f\" <img alt=\\\"strava login\\\" src=\\\"data:image/png;base64,{base64_image}\\\" width=\\\"100%\\\">\" f\"</a>\"", "= {\"name\": DEFAULT_ACTIVITY_LABEL, \"start_date_local\": \"\"} activity = st.selectbox( label=\"Select an", "def download_activity(activity, strava_auth): with st.spinner(f\"Downloading activity \\\"{activity['name']}\\\"...\"): return sweat.read_strava(activity[\"id\"], strava_auth[\"access_token\"])", "def load_image_as_base64(image_path): with open(image_path, \"rb\") as f: contents = f.read()", "help=\"The Strava API returns your activities in chunks of 30.", "activities in chunks of 30. Increment this field to go", "STRAVA_API_BASE_URL = \"https://www.strava.com/api/v3\" DEFAULT_ACTIVITY_LABEL = \"NO_ACTIVITY_SELECTED\" STRAVA_ORANGE = \"#fc4c02\" @st.cache(show_spinner=False)", "page\", min_value=1, help=\"The Strava API returns your activities in chunks", "st.info(\"This Strava account has no activities or you ran out", "base64_image = load_image_as_base64(\"./static/btn_strava_connectwith_orange@2x.png\") base.markdown( ( f\"<a href=\\\"{strava_authorization_url}\\\">\" f\" <img alt=\\\"strava", "if header is None: base = st else: col1, _,", "} ) try: response.raise_for_status() except httpx.HTTPStatusError: st.error(\"Something went wrong while", "activity selected\") st.stop() return activity_url = f\"https://www.strava.com/activities/{activity['id']}\" st.markdown( f\"<a href=\\\"{activity_url}\\\"", "\"start_date_local\": \"\"} activity = st.selectbox( label=\"Select an activity\", options=[default_activity] +", "+ activities, format_func=activity_label, ) if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: st.write(\"No activity", "@st.cache(show_spinner=False) def get_activities(auth, page=1): access_token = auth[\"access_token\"] response = httpx.get(", "= httpx.Request( method=\"GET\", url=STRAVA_AUTHORIZATION_URL, params={ \"client_id\": STRAVA_CLIENT_ID, \"redirect_uri\": APP_URL, \"response_type\":", "None: base = st else: _, col2, _, button =", "access_token = auth[\"access_token\"] response = httpx.get( url=f\"{STRAVA_API_BASE_URL}/athlete/activities\", params={ \"page\": page,", "start_date.format(\"YYYY-MM-DD\") return f\"{activity['name']} - {date_string} ({human_readable_date})\" def select_strava_activity(auth): col1, col2", "field to go to the next page.\", ) with col2:", "to go to the next page.\", ) with col2: activities", "login_header(header=header) if stop_if_unauthenticated: st.stop() return else: logout_header(header=header) strava_auth = exchange_authorization_code(authorization_code)", "load_image_as_base64(\"./static/api_logo_pwrdBy_strava_horiz_light.png\") st.markdown( f'<img src=\"data:image/png;base64,{base64_image}\" width=\"100%\" alt=\"powered by strava\">', unsafe_allow_html=True, )", "if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: st.write(\"No activity selected\") st.stop() return activity_url", "strava_authorization_url = authorization_url() if header is None: base = st", "strava_auth[\"athlete\"][\"firstname\"] last_name = strava_auth[\"athlete\"][\"lastname\"] col.markdown(f\"*Welcome, {first_name} {last_name}!*\") @st.cache(show_spinner=False, suppress_st_warning=True) def", "httpx.get( url=f\"{STRAVA_API_BASE_URL}/athlete/activities\", params={ \"page\": page, }, headers={ \"Authorization\": f\"Bearer {access_token}\",", "= f\"window.location.href = '{APP_URL}'\" html = f\"<img src onerror=\\\"{js}\\\">\" div", "st.error(\"Something went wrong while authenticating with Strava. Please reload and", "{\"name\": DEFAULT_ACTIVITY_LABEL, \"start_date_local\": \"\"} activity = st.selectbox( label=\"Select an activity\",", "response.raise_for_status() except httpx.HTTPStatusError: st.error(\"Something went wrong while authenticating with Strava.", "col2, _, button = header base = button with col2:", "login\\\" src=\\\"data:image/png;base64,{base64_image}\\\" width=\\\"100%\\\">\" f\"</a>\" ), unsafe_allow_html=True, ) def logout_header(header=None): if", "went wrong while authenticating with Strava. Please reload and try", "with col3: strava_button = st.empty() return col1, col2, col3, strava_button", ") with col2: activities = get_activities(auth=auth, page=page) if not activities:", "base.markdown( ( f\"<a href=\\\"{strava_authorization_url}\\\">\" f\" <img alt=\\\"strava login\\\" src=\\\"data:image/png;base64,{base64_image}\\\" width=\\\"100%\\\">\"", "f\"window.location.href = '{APP_URL}'\" html = f\"<img src onerror=\\\"{js}\\\">\" div =", "activity_label(activity): if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: return \"\" start_date = arrow.get(activity[\"start_date_local\"])", "f\"<img src onerror=\\\"{js}\\\">\" div = Div(text=html) st.bokeh_chart(div) def logged_in_title(strava_auth, header=None):", "is None: login_header(header=header) if stop_if_unauthenticated: st.stop() return else: logout_header(header=header) strava_auth", "Please reload and try again\") st.experimental_set_query_params() st.stop() return strava_auth =", "def powered_by_strava_logo(): base64_image = load_image_as_base64(\"./static/api_logo_pwrdBy_strava_horiz_light.png\") st.markdown( f'<img src=\"data:image/png;base64,{base64_image}\" width=\"100%\" alt=\"powered", "= httpx.post( url=\"https://www.strava.com/oauth/token\", json={ \"client_id\": STRAVA_CLIENT_ID, \"client_secret\": STRAVA_CLIENT_SECRET, \"code\": authorization_code,", "API returns your activities in chunks of 30. Increment this", "st.bokeh_chart(div) def logged_in_title(strava_auth, header=None): if header is None: base =", "return col1, col2, col3, strava_button @st.cache(show_spinner=False) def get_activities(auth, page=1): access_token", "@st.cache(show_spinner=False) def load_image_as_base64(image_path): with open(image_path, \"rb\") as f: contents =", "out\"): js = f\"window.location.href = '{APP_URL}'\" html = f\"<img src", "if stop_if_unauthenticated: st.stop() return else: logout_header(header=header) strava_auth = exchange_authorization_code(authorization_code) logged_in_title(strava_auth,", "authorization_url() if header is None: base = st else: col1,", "return f\"{activity['name']} - {date_string} ({human_readable_date})\" def select_strava_activity(auth): col1, col2 =", "os.environ[\"STRAVA_CLIENT_SECRET\"] STRAVA_AUTHORIZATION_URL = \"https://www.strava.com/oauth/authorize\" STRAVA_API_BASE_URL = \"https://www.strava.com/api/v3\" DEFAULT_ACTIVITY_LABEL = \"NO_ACTIVITY_SELECTED\"", "col2: activities = get_activities(auth=auth, page=page) if not activities: st.info(\"This Strava", "st.stop() return strava_auth = response.json() return strava_auth def authenticate(header=None, stop_if_unauthenticated=True):", "\"\" start_date = arrow.get(activity[\"start_date_local\"]) human_readable_date = start_date.humanize(granularity=[\"day\"]) date_string = start_date.format(\"YYYY-MM-DD\")", "button = header base = button with col2: powered_by_strava_logo() if", "min_value=1, help=\"The Strava API returns your activities in chunks of", "authenticating with Strava. Please reload and try again\") st.experimental_set_query_params() st.stop()", "= st.number_input( label=\"Activities page\", min_value=1, help=\"The Strava API returns your", "js = f\"window.location.href = '{APP_URL}'\" html = f\"<img src onerror=\\\"{js}\\\">\"", "col2, col3, strava_button @st.cache(show_spinner=False) def get_activities(auth, page=1): access_token = auth[\"access_token\"]", "width=\"100%\" alt=\"powered by strava\">', unsafe_allow_html=True, ) def authorization_url(): request =", "None: base = st else: col1, _, _, button =", "headers={ \"Authorization\": f\"Bearer {access_token}\", }, ) return response.json() def activity_label(activity):", "page.\", ) with col2: activities = get_activities(auth=auth, page=page) if not", "= header base = col first_name = strava_auth[\"athlete\"][\"firstname\"] last_name =", "else: col1, _, _, button = header base = button", "response.json() def activity_label(activity): if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: return \"\" start_date", "}, headers={ \"Authorization\": f\"Bearer {access_token}\", }, ) return response.json() def", "\"Authorization\": f\"Bearer {access_token}\", }, ) return response.json() def activity_label(activity): if", ") if activity[\"name\"] == DEFAULT_ACTIVITY_LABEL: st.write(\"No activity selected\") st.stop() return", "= strava_auth[\"athlete\"][\"lastname\"] col.markdown(f\"*Welcome, {first_name} {last_name}!*\") @st.cache(show_spinner=False, suppress_st_warning=True) def exchange_authorization_code(authorization_code): response", "Strava account has no activities or you ran out of", "exchange_authorization_code(authorization_code) logged_in_title(strava_auth, header) st.experimental_set_query_params(session=authorization_code) return strava_auth def header(): col1, col2,", "col1, col2, col3 = st.beta_columns(3) with col3: strava_button = st.empty()", "pages.\") st.stop() default_activity = {\"name\": DEFAULT_ACTIVITY_LABEL, \"start_date_local\": \"\"} activity =", "strava_auth def header(): col1, col2, col3 = st.beta_columns(3) with col3:", "login_header(header=None): strava_authorization_url = authorization_url() if header is None: base =", "as f: contents = f.read() return base64.b64encode(contents).decode(\"utf-8\") def powered_by_strava_logo(): base64_image", "st else: col1, _, _, button = header base =", "STRAVA_CLIENT_ID, \"redirect_uri\": APP_URL, \"response_type\": \"code\", \"approval_prompt\": \"auto\", \"scope\": \"activity:read_all\" }", "= os.environ[\"APP_URL\"] STRAVA_CLIENT_ID = os.environ[\"STRAVA_CLIENT_ID\"] STRAVA_CLIENT_SECRET = os.environ[\"STRAVA_CLIENT_SECRET\"] STRAVA_AUTHORIZATION_URL =", "selected\") st.stop() return activity_url = f\"https://www.strava.com/activities/{activity['id']}\" st.markdown( f\"<a href=\\\"{activity_url}\\\" style=\\\"color:{STRAVA_ORANGE};\\\">View", "= get_activities(auth=auth, page=page) if not activities: st.info(\"This Strava account has", "col3, strava_button @st.cache(show_spinner=False) def get_activities(auth, page=1): access_token = auth[\"access_token\"] response", "\"scope\": \"activity:read_all\" } ) return request.url def login_header(header=None): strava_authorization_url =", "response.json() return strava_auth def authenticate(header=None, stop_if_unauthenticated=True): query_params = st.experimental_get_query_params() authorization_code", "page = st.number_input( label=\"Activities page\", min_value=1, help=\"The Strava API returns", "\"\"} activity = st.selectbox( label=\"Select an activity\", options=[default_activity] + activities,", "f\"</a>\" ), unsafe_allow_html=True, ) def logout_header(header=None): if header is None:", "label=\"Select an activity\", options=[default_activity] + activities, format_func=activity_label, ) if activity[\"name\"]", "_, _, _ = header base = col first_name =", "= \"https://www.strava.com/oauth/authorize\" STRAVA_API_BASE_URL = \"https://www.strava.com/api/v3\" DEFAULT_ACTIVITY_LABEL = \"NO_ACTIVITY_SELECTED\" STRAVA_ORANGE =", "authorization_code is None: authorization_code = query_params.get(\"session\", [None])[0] if authorization_code is", "base64_image = load_image_as_base64(\"./static/api_logo_pwrdBy_strava_horiz_light.png\") st.markdown( f'<img src=\"data:image/png;base64,{base64_image}\" width=\"100%\" alt=\"powered by strava\">',", "httpx import streamlit as st import sweat from bokeh.models.widgets import", "go to the next page.\", ) with col2: activities =", "\"client_id\": STRAVA_CLIENT_ID, \"client_secret\": STRAVA_CLIENT_SECRET, \"code\": authorization_code, \"grant_type\": \"authorization_code\", } )", "def select_strava_activity(auth): col1, col2 = st.beta_columns([1, 3]) with col1: page", "or you ran out of pages.\") st.stop() default_activity = {\"name\":", "wrong while authenticating with Strava. Please reload and try again\")", "activity_url = f\"https://www.strava.com/activities/{activity['id']}\" st.markdown( f\"<a href=\\\"{activity_url}\\\" style=\\\"color:{STRAVA_ORANGE};\\\">View on Strava</a>\", unsafe_allow_html=True", "load_image_as_base64(\"./static/btn_strava_connectwith_orange@2x.png\") base.markdown( ( f\"<a href=\\\"{strava_authorization_url}\\\">\" f\" <img alt=\\\"strava login\\\" src=\\\"data:image/png;base64,{base64_image}\\\"", "return activity_url = f\"https://www.strava.com/activities/{activity['id']}\" st.markdown( f\"<a href=\\\"{activity_url}\\\" style=\\\"color:{STRAVA_ORANGE};\\\">View on Strava</a>\",", "powered_by_strava_logo(): base64_image = load_image_as_base64(\"./static/api_logo_pwrdBy_strava_horiz_light.png\") st.markdown( f'<img src=\"data:image/png;base64,{base64_image}\" width=\"100%\" alt=\"powered by", "return request.url def login_header(header=None): strava_authorization_url = authorization_url() if header is", "strava_auth[\"athlete\"][\"lastname\"] col.markdown(f\"*Welcome, {first_name} {last_name}!*\") @st.cache(show_spinner=False, suppress_st_warning=True) def exchange_authorization_code(authorization_code): response =", "import os import arrow import httpx import streamlit as st", ") try: response.raise_for_status() except httpx.HTTPStatusError: st.error(\"Something went wrong while authenticating", "header is None: base = st else: _, col2, _,", "{last_name}!*\") @st.cache(show_spinner=False, suppress_st_warning=True) def exchange_authorization_code(authorization_code): response = httpx.post( url=\"https://www.strava.com/oauth/token\", json={", "col first_name = strava_auth[\"athlete\"][\"firstname\"] last_name = strava_auth[\"athlete\"][\"lastname\"] col.markdown(f\"*Welcome, {first_name} {last_name}!*\")", "stop_if_unauthenticated: st.stop() return else: logout_header(header=header) strava_auth = exchange_authorization_code(authorization_code) logged_in_title(strava_auth, header)", "<img alt=\\\"strava login\\\" src=\\\"data:image/png;base64,{base64_image}\\\" width=\\\"100%\\\">\" f\"</a>\" ), unsafe_allow_html=True, ) def", "Strava</a>\", unsafe_allow_html=True ) return activity @st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True) def download_activity(activity,", "query_params = st.experimental_get_query_params() authorization_code = query_params.get(\"code\", [None])[0] if authorization_code is", "_, col2, _, button = header base = button with", "def authenticate(header=None, stop_if_unauthenticated=True): query_params = st.experimental_get_query_params() authorization_code = query_params.get(\"code\", [None])[0]", "activities: st.info(\"This Strava account has no activities or you ran", "human_readable_date = start_date.humanize(granularity=[\"day\"]) date_string = start_date.format(\"YYYY-MM-DD\") return f\"{activity['name']} - {date_string}", "= st.experimental_get_query_params() authorization_code = query_params.get(\"code\", [None])[0] if authorization_code is None:", "onerror=\\\"{js}\\\">\" div = Div(text=html) st.bokeh_chart(div) def logged_in_title(strava_auth, header=None): if header", "= response.json() return strava_auth def authenticate(header=None, stop_if_unauthenticated=True): query_params = st.experimental_get_query_params()", "with col1: powered_by_strava_logo() base64_image = load_image_as_base64(\"./static/btn_strava_connectwith_orange@2x.png\") base.markdown( ( f\"<a href=\\\"{strava_authorization_url}\\\">\"", "import httpx import streamlit as st import sweat from bokeh.models.widgets", "st.write(\"No activity selected\") st.stop() return activity_url = f\"https://www.strava.com/activities/{activity['id']}\" st.markdown( f\"<a", "response = httpx.get( url=f\"{STRAVA_API_BASE_URL}/athlete/activities\", params={ \"page\": page, }, headers={ \"Authorization\":", "httpx.HTTPStatusError: st.error(\"Something went wrong while authenticating with Strava. Please reload", "account has no activities or you ran out of pages.\")", "st.empty() return col1, col2, col3, strava_button @st.cache(show_spinner=False) def get_activities(auth, page=1):", "= exchange_authorization_code(authorization_code) logged_in_title(strava_auth, header) st.experimental_set_query_params(session=authorization_code) return strava_auth def header(): col1,", "def authorization_url(): request = httpx.Request( method=\"GET\", url=STRAVA_AUTHORIZATION_URL, params={ \"client_id\": STRAVA_CLIENT_ID,", "authenticate(header=None, stop_if_unauthenticated=True): query_params = st.experimental_get_query_params() authorization_code = query_params.get(\"code\", [None])[0] if", "strava_auth def authenticate(header=None, stop_if_unauthenticated=True): query_params = st.experimental_get_query_params() authorization_code = query_params.get(\"code\",", "page=1): access_token = auth[\"access_token\"] response = httpx.get( url=f\"{STRAVA_API_BASE_URL}/athlete/activities\", params={ \"page\":", "else: col, _, _, _ = header base = col", "DEFAULT_ACTIVITY_LABEL = \"NO_ACTIVITY_SELECTED\" STRAVA_ORANGE = \"#fc4c02\" @st.cache(show_spinner=False) def load_image_as_base64(image_path): with", "while authenticating with Strava. Please reload and try again\") st.experimental_set_query_params()", "header base = button with col1: powered_by_strava_logo() base64_image = load_image_as_base64(\"./static/btn_strava_connectwith_orange@2x.png\")", "[None])[0] if authorization_code is None: authorization_code = query_params.get(\"session\", [None])[0] if", "import Div APP_URL = os.environ[\"APP_URL\"] STRAVA_CLIENT_ID = os.environ[\"STRAVA_CLIENT_ID\"] STRAVA_CLIENT_SECRET =", "\"redirect_uri\": APP_URL, \"response_type\": \"code\", \"approval_prompt\": \"auto\", \"scope\": \"activity:read_all\" } )", "page, }, headers={ \"Authorization\": f\"Bearer {access_token}\", }, ) return response.json()", "default_activity = {\"name\": DEFAULT_ACTIVITY_LABEL, \"start_date_local\": \"\"} activity = st.selectbox( label=\"Select", "_, button = header base = button with col1: powered_by_strava_logo()", "is None: authorization_code = query_params.get(\"session\", [None])[0] if authorization_code is None:", "header) st.experimental_set_query_params(session=authorization_code) return strava_auth def header(): col1, col2, col3 =", "url=STRAVA_AUTHORIZATION_URL, params={ \"client_id\": STRAVA_CLIENT_ID, \"redirect_uri\": APP_URL, \"response_type\": \"code\", \"approval_prompt\": \"auto\",", "\"client_secret\": STRAVA_CLIENT_SECRET, \"code\": authorization_code, \"grant_type\": \"authorization_code\", } ) try: response.raise_for_status()", "first_name = strava_auth[\"athlete\"][\"firstname\"] last_name = strava_auth[\"athlete\"][\"lastname\"] col.markdown(f\"*Welcome, {first_name} {last_name}!*\") @st.cache(show_spinner=False,", "src=\"data:image/png;base64,{base64_image}\" width=\"100%\" alt=\"powered by strava\">', unsafe_allow_html=True, ) def authorization_url(): request", "\"response_type\": \"code\", \"approval_prompt\": \"auto\", \"scope\": \"activity:read_all\" } ) return request.url", "= f\"<img src onerror=\\\"{js}\\\">\" div = Div(text=html) st.bokeh_chart(div) def logged_in_title(strava_auth,", "{date_string} ({human_readable_date})\" def select_strava_activity(auth): col1, col2 = st.beta_columns([1, 3]) with", "if header is None: base = st else: col, _," ]
[ "\"reset\", \"default allow\" argv = [progName] argv.extend(actionstr.split(' ')) # generate", "['22','22/tcp','53/udp'] # port without protocol includes all protocols if pp", "and protocol string ['22','22/tcp','53/udp'] # port without protocol includes all", "enumerate(backend.get_rules()): rule_port = None try: rule_port = int(rule.dport) except: rule_port", "# ufw.allow('22/tcp') # allow port 22, tcp protocol # ufw.allow(53,'udp')", "# return status string (default verbose=True) # ufw.run(\"allow 22\") #", "delete return False def status(verbose=True): cmd = 'status' if verbose:", "i18n; fixes '_' not defined ui = ufw.frontend.UFWFrontend(False) # no", "while ports deleted re-enumerate and continue def _delete(port): for i,rule", "+ pp) def deny(port=None, protocol=None): # port int; protocol str", "22\", \"reset\", \"default allow\" argv = [progName] argv.extend(actionstr.split(' ')) #", "pp is None: run('default allow') else: run('allow ' + pp)", "rule_port = None if rule_port is not None and port", "progName = ufw.common.programName gettext.install(progName)#, unicode=True) # for i18n; fixes '_'", "run('deny ' + pp) def delete(port): # delete all rules", "= ui.backend parse_command = ufw.frontend.parse_command def _parse(actionstr): # parse commands", "# enable firewall # ufw.allow() # default allow -- allow", "https://gitlab.com/dhj/easyufw # A thin wrapper over the thin wrapper that", "ui = ufw.frontend.UFWFrontend(False) # no dryrun -- do it live", "one rule; enumeration changes after delete return False def status(verbose=True):", "protocol=None): # port int; protocol str ['tcp','udp'] pp = None", "ufw.deny() # default deny -- deny all # ufw.deny(22,'tcp') #", "port 53, udp protocol # ufw.allow(53,'udp') # allow port 53,", "<filename>appliance/src/ufw_interface.py #!/usr/bin/env python #shamelessy stolen from: https://gitlab.com/dhj/easyufw # A thin", "rules referencing port 22 # ufw.reset() # restore defaults #", "status string (default verbose=True) # ufw.run(\"allow 22\") # directly run", "allow(port=None, protocol=None): # port int; protocol str ['tcp','udp'] pp =", "rule_port: run(\"delete \" + str(i+1), force=True) return True # delete", "an explicit force argument pr = _parse(actionstr) rule = pr.data.get('rule','')", "ports deleted re-enumerate and continue def _delete(port): for i,rule in", "' + pp) def delete(port): # delete all rules by", "22, tcp protocol # ufw.delete(22) # delete rules referencing port", "run('allow ' + pp) def deny(port=None, protocol=None): # port int;", "pr.data.get('iptype','') return ui.do_action(pr.action,rule,iptype,force) def reset(force=True): run('reset',force=force) def enable(): ui.set_enabled(True) def", "= int(rule.dport) except: rule_port = None if rule_port is not", "')) # generate bogus argv to parse pr = parse_command(argv)", "for i18n; fixes '_' not defined ui = ufw.frontend.UFWFrontend(False) #", "bogus argv to parse pr = parse_command(argv) return pr def", "ufw.common.programName gettext.install(progName)#, unicode=True) # for i18n; fixes '_' not defined", "+= str(port) if protocol is not None: pp += '/'", "= parse_command(argv) return pr def run(actionstr, force=False): # run command", "like \"allow 22\", \"reset\", \"default allow\" argv = [progName] argv.extend(actionstr.split('", "#!/usr/bin/env python #shamelessy stolen from: https://gitlab.com/dhj/easyufw # A thin wrapper", "# for i18n; fixes '_' not defined ui = ufw.frontend.UFWFrontend(False)", "is None: run('default allow') else: run('allow ' + pp) def", "# ufw.allow(22,'tcp') # allow port 22, tcp protocol # ufw.allow('22/tcp')", "port 22, tcp protocol # ufw.delete(22) # delete rules referencing", "all # ufw.deny(22,'tcp') # deny port 22, tcp protocol #", "\"default allow\" argv = [progName] argv.extend(actionstr.split(' ')) # generate bogus", "def reset(force=True): run('reset',force=force) def enable(): ui.set_enabled(True) def disable(): ui.set_enabled(False) def", "+= '/' + protocol _deny(pp) def _deny(pp=None): # pp =", "# delete one rule; enumeration changes after delete return False", "allow port 22, tcp protocol # ufw.allow('22/tcp') # allow port", "force argument pr = _parse(actionstr) rule = pr.data.get('rule','') # commands", "not defined ui = ufw.frontend.UFWFrontend(False) # no dryrun -- do", "rules by destination port while _delete(port): pass # while ports", "_parse(actionstr): # parse commands like \"allow 22\", \"reset\", \"default allow\"", "iptype = pr.data.get('iptype','') return ui.do_action(pr.action,rule,iptype,force) def reset(force=True): run('reset',force=force) def enable():", "run(actionstr, force=False): # run command with an explicit force argument", "_parse(actionstr) rule = pr.data.get('rule','') # commands like reset don't have", "force=False): # run command with an explicit force argument pr", "not None and port == rule_port: run(\"delete \" + str(i+1),", "delete one rule; enumeration changes after delete return False def", "allow port 53, udp protocol # ufw.allow(53,'udp') # allow port", "+ str(i+1), force=True) return True # delete one rule; enumeration", "False def status(verbose=True): cmd = 'status' if verbose: cmd +=", "def disable(): ui.set_enabled(False) def allow(port=None, protocol=None): # port int; protocol", "allow port 22, tcp protocol # ufw.allow(53,'udp') # allow port", "# port int; protocol str ['tcp','udp'] pp = None if", "parse_command = ufw.frontend.parse_command def _parse(actionstr): # parse commands like \"allow", "# no dryrun -- do it live backend = ui.backend", "# allow port 22, any protocol # ufw.allow(22,'tcp') # allow", "ufw # Usage: # import easyufw as ufw # ufw.disable()", "# Usage: # import easyufw as ufw # ufw.disable() #", "53, udp protocol # ufw.deny() # default deny -- deny", "# default deny -- deny all # ufw.deny(22,'tcp') # deny", "deny all # ufw.deny(22,'tcp') # deny port 22, tcp protocol", "'/' + protocol _allow(pp) def _allow(pp=None): # pp = port", "# ufw.allow(53,'udp') # allow port 53, udp protocol # ufw.allow(53,'udp')", "allow') else: run('allow ' + pp) def deny(port=None, protocol=None): #", "# directly run command as if from command line import", "pr = _parse(actionstr) rule = pr.data.get('rule','') # commands like reset", "str(i+1), force=True) return True # delete one rule; enumeration changes", "protocol # ufw.delete(22) # delete rules referencing port 22 #", "argv = [progName] argv.extend(actionstr.split(' ')) # generate bogus argv to", "+ pp) def delete(port): # delete all rules by destination", "# import easyufw as ufw # ufw.disable() # disable firewall", "# ufw.allow(53,'udp') # allow port 53, udp protocol # ufw.deny()", "protocol str ['tcp','udp'] pp = None if port is not", "None and port == rule_port: run(\"delete \" + str(i+1), force=True)", "from command line import ufw.frontend import ufw.common import gettext progName", "protocol is not None: pp += '/' + protocol _allow(pp)", "def run(actionstr, force=False): # run command with an explicit force", "# ufw.reset() # restore defaults # ufw.status() # return status", "ufw.allow(53,'udp') # allow port 53, udp protocol # ufw.allow(53,'udp') #", "def _parse(actionstr): # parse commands like \"allow 22\", \"reset\", \"default", "pass # while ports deleted re-enumerate and continue def _delete(port):", "with an explicit force argument pr = _parse(actionstr) rule =", "have a rule iptype = pr.data.get('iptype','') return ui.do_action(pr.action,rule,iptype,force) def reset(force=True):", "# ufw.deny() # default deny -- deny all # ufw.deny(22,'tcp')", "in enumerate(backend.get_rules()): rule_port = None try: rule_port = int(rule.dport) except:", "protocols if pp is None: run('default allow') else: run('allow '", "includes all protocols if pp is None: run('default allow') else:", "= None try: rule_port = int(rule.dport) except: rule_port = None", "port and protocol string ['22','22/tcp','53/udp'] # port without protocol includes", "is not None: pp = \"\" # port and protocol", "port while _delete(port): pass # while ports deleted re-enumerate and", "all rules by destination port while _delete(port): pass # while", "import easyufw as ufw # ufw.disable() # disable firewall #", "command as if from command line import ufw.frontend import ufw.common", "protocol # ufw.allow('22/tcp') # allow port 22, tcp protocol #", "string if pp is None: run('default deny') else: run('deny '", "22\") # directly run command as if from command line", "port == rule_port: run(\"delete \" + str(i+1), force=True) return True", "int(rule.dport) except: rule_port = None if rule_port is not None", "enable(): ui.set_enabled(True) def disable(): ui.set_enabled(False) def allow(port=None, protocol=None): # port", "is None: run('default deny') else: run('deny ' + pp) def", "' + pp) def deny(port=None, protocol=None): # port int; protocol", "wrapper that is ufw # Usage: # import easyufw as", "protocol # ufw.allow(22,'tcp') # allow port 22, tcp protocol #", "commands like \"allow 22\", \"reset\", \"default allow\" argv = [progName]", "status(verbose=True): cmd = 'status' if verbose: cmd += ' verbose'", "= \"\" # port and protocol string pp += str(port)", "any protocol # ufw.allow(22,'tcp') # allow port 22, tcp protocol", "pp = None if port is not None: pp =", "if from command line import ufw.frontend import ufw.common import gettext", "else: run('allow ' + pp) def deny(port=None, protocol=None): # port", "A thin wrapper over the thin wrapper that is ufw", "port without protocol includes all protocols if pp is None:", "protocol includes all protocols if pp is None: run('default allow')", "enable firewall # ufw.allow() # default allow -- allow all", "all # ufw.allow(22) # allow port 22, any protocol #", "commands like reset don't have a rule iptype = pr.data.get('iptype','')", "pp is None: run('default deny') else: run('deny ' + pp)", "+= '/' + protocol _allow(pp) def _allow(pp=None): # pp =", "ufw.allow('22/tcp') # allow port 22, tcp protocol # ufw.allow(53,'udp') #", "ufw.frontend.UFWFrontend(False) # no dryrun -- do it live backend =", "# delete rules referencing port 22 # ufw.reset() # restore", "string (default verbose=True) # ufw.run(\"allow 22\") # directly run command", "_deny(pp) def _deny(pp=None): # pp = port and protocol string", "= pr.data.get('rule','') # commands like reset don't have a rule", "deny -- deny all # ufw.deny(22,'tcp') # deny port 22,", "deleted re-enumerate and continue def _delete(port): for i,rule in enumerate(backend.get_rules()):", "command with an explicit force argument pr = _parse(actionstr) rule", "run command as if from command line import ufw.frontend import", "# ufw.allow(22) # allow port 22, any protocol # ufw.allow(22,'tcp')", "protocol string ['22','22/tcp','53/udp'] # port without protocol includes all protocols", "like reset don't have a rule iptype = pr.data.get('iptype','') return", "-- deny all # ufw.deny(22,'tcp') # deny port 22, tcp", "restore defaults # ufw.status() # return status string (default verbose=True)", "ufw.run(\"allow 22\") # directly run command as if from command", "None if port is not None: pp = \"\" #", "= ufw.frontend.UFWFrontend(False) # no dryrun -- do it live backend", "port 22, tcp protocol # ufw.allow(53,'udp') # allow port 53,", "# port without protocol includes all protocols if pp is", "rule_port is not None and port == rule_port: run(\"delete \"", "# allow port 53, udp protocol # ufw.deny() # default", "port and protocol string if pp is None: run('default deny')", "def enable(): ui.set_enabled(True) def disable(): ui.set_enabled(False) def allow(port=None, protocol=None): #", "def delete(port): # delete all rules by destination port while", "= None if rule_port is not None and port ==", "return False def status(verbose=True): cmd = 'status' if verbose: cmd", "udp protocol # ufw.allow(53,'udp') # allow port 53, udp protocol", "to parse pr = parse_command(argv) return pr def run(actionstr, force=False):", "else: run('deny ' + pp) def delete(port): # delete all", "ufw.frontend.parse_command def _parse(actionstr): # parse commands like \"allow 22\", \"reset\",", "def _allow(pp=None): # pp = port and protocol string ['22','22/tcp','53/udp']", "is not None and port == rule_port: run(\"delete \" +", "defined ui = ufw.frontend.UFWFrontend(False) # no dryrun -- do it", "ui.set_enabled(True) def disable(): ui.set_enabled(False) def allow(port=None, protocol=None): # port int;", "= None if port is not None: pp = \"\"", "pp) def delete(port): # delete all rules by destination port", "def allow(port=None, protocol=None): # port int; protocol str ['tcp','udp'] pp", "allow all # ufw.allow(22) # allow port 22, any protocol", "a rule iptype = pr.data.get('iptype','') return ui.do_action(pr.action,rule,iptype,force) def reset(force=True): run('reset',force=force)", "return status string (default verbose=True) # ufw.run(\"allow 22\") # directly", "except: rule_port = None if rule_port is not None and", "thin wrapper that is ufw # Usage: # import easyufw", "port 22 # ufw.reset() # restore defaults # ufw.status() #", "ufw.reset() # restore defaults # ufw.status() # return status string", "run('default deny') else: run('deny ' + pp) def delete(port): #", "rule iptype = pr.data.get('iptype','') return ui.do_action(pr.action,rule,iptype,force) def reset(force=True): run('reset',force=force) def", "ui.set_enabled(False) def allow(port=None, protocol=None): # port int; protocol str ['tcp','udp']", "protocol string pp += str(port) if protocol is not None:", "# allow port 53, udp protocol # ufw.allow(53,'udp') # allow", "directly run command as if from command line import ufw.frontend", "\"\" # port and protocol string pp += str(port) if", "protocol is not None: pp += '/' + protocol _deny(pp)", "delete rules referencing port 22 # ufw.reset() # restore defaults", "without protocol includes all protocols if pp is None: run('default", "import ufw.frontend import ufw.common import gettext progName = ufw.common.programName gettext.install(progName)#,", "pr = parse_command(argv) return pr def run(actionstr, force=False): # run", "don't have a rule iptype = pr.data.get('iptype','') return ui.do_action(pr.action,rule,iptype,force) def", "port 22, tcp protocol # ufw.allow('22/tcp') # allow port 22,", "pp = port and protocol string if pp is None:", "is not None: pp += '/' + protocol _allow(pp) def", "over the thin wrapper that is ufw # Usage: #", "default allow -- allow all # ufw.allow(22) # allow port", "ui.do_action(pr.action,rule,iptype,force) def reset(force=True): run('reset',force=force) def enable(): ui.set_enabled(True) def disable(): ui.set_enabled(False)", "ufw.allow() # default allow -- allow all # ufw.allow(22) #", "referencing port 22 # ufw.reset() # restore defaults # ufw.status()", "# allow port 22, tcp protocol # ufw.allow(53,'udp') # allow", "True # delete one rule; enumeration changes after delete return", "(default verbose=True) # ufw.run(\"allow 22\") # directly run command as", "it live backend = ui.backend parse_command = ufw.frontend.parse_command def _parse(actionstr):", "ufw.disable() # disable firewall # ufw.enable() # enable firewall #", "run('reset',force=force) def enable(): ui.set_enabled(True) def disable(): ui.set_enabled(False) def allow(port=None, protocol=None):", "that is ufw # Usage: # import easyufw as ufw", "explicit force argument pr = _parse(actionstr) rule = pr.data.get('rule','') #", "def status(verbose=True): cmd = 'status' if verbose: cmd += '", "allow\" argv = [progName] argv.extend(actionstr.split(' ')) # generate bogus argv", "string pp += str(port) if protocol is not None: pp", "is not None: pp += '/' + protocol _deny(pp) def", "_delete(port): for i,rule in enumerate(backend.get_rules()): rule_port = None try: rule_port", "if pp is None: run('default allow') else: run('allow ' +", "# restore defaults # ufw.status() # return status string (default", "for i,rule in enumerate(backend.get_rules()): rule_port = None try: rule_port =", "protocol _allow(pp) def _allow(pp=None): # pp = port and protocol", "None: pp = \"\" # port and protocol string pp", "# default allow -- allow all # ufw.allow(22) # allow", "as if from command line import ufw.frontend import ufw.common import", "not None: pp += '/' + protocol _deny(pp) def _deny(pp=None):", "rule_port = None try: rule_port = int(rule.dport) except: rule_port =", "do it live backend = ui.backend parse_command = ufw.frontend.parse_command def", "pp += str(port) if protocol is not None: pp +=", "pr def run(actionstr, force=False): # run command with an explicit", "verbose=True) # ufw.run(\"allow 22\") # directly run command as if", "# ufw.status() # return status string (default verbose=True) # ufw.run(\"allow", "# ufw.enable() # enable firewall # ufw.allow() # default allow", "parse commands like \"allow 22\", \"reset\", \"default allow\" argv =", "parse_command(argv) return pr def run(actionstr, force=False): # run command with", "-- allow all # ufw.allow(22) # allow port 22, any", "and protocol string pp += str(port) if protocol is not", "ufw.deny(22,'tcp') # deny port 22, tcp protocol # ufw.delete(22) #", "default deny -- deny all # ufw.deny(22,'tcp') # deny port", "all protocols if pp is None: run('default allow') else: run('allow", "deny port 22, tcp protocol # ufw.delete(22) # delete rules", "None try: rule_port = int(rule.dport) except: rule_port = None if", "disable firewall # ufw.enable() # enable firewall # ufw.allow() #", "dryrun -- do it live backend = ui.backend parse_command =", "-- do it live backend = ui.backend parse_command = ufw.frontend.parse_command", "wrapper over the thin wrapper that is ufw # Usage:", "protocol # ufw.allow(53,'udp') # allow port 53, udp protocol #", "firewall # ufw.enable() # enable firewall # ufw.allow() # default", "string ['22','22/tcp','53/udp'] # port without protocol includes all protocols if", "None: pp += '/' + protocol _deny(pp) def _deny(pp=None): #", "if rule_port is not None and port == rule_port: run(\"delete", "force=True) return True # delete one rule; enumeration changes after", "_allow(pp=None): # pp = port and protocol string ['22','22/tcp','53/udp'] #", "\" + str(i+1), force=True) return True # delete one rule;", "22, any protocol # ufw.allow(22,'tcp') # allow port 22, tcp", "if port is not None: pp = \"\" # port", "22, tcp protocol # ufw.allow('22/tcp') # allow port 22, tcp", "= ufw.common.programName gettext.install(progName)#, unicode=True) # for i18n; fixes '_' not", "int; protocol str ['tcp','udp'] pp = None if port is", "reset don't have a rule iptype = pr.data.get('iptype','') return ui.do_action(pr.action,rule,iptype,force)", "tcp protocol # ufw.delete(22) # delete rules referencing port 22", "rule; enumeration changes after delete return False def status(verbose=True): cmd", "deny') else: run('deny ' + pp) def delete(port): # delete", "str ['tcp','udp'] pp = None if port is not None:", "firewall # ufw.allow() # default allow -- allow all #", "= [progName] argv.extend(actionstr.split(' ')) # generate bogus argv to parse", "None: run('default allow') else: run('allow ' + pp) def deny(port=None,", "def deny(port=None, protocol=None): # port int; protocol str ['tcp','udp'] pp", "allow -- allow all # ufw.allow(22) # allow port 22,", "import ufw.common import gettext progName = ufw.common.programName gettext.install(progName)#, unicode=True) #", "while _delete(port): pass # while ports deleted re-enumerate and continue", "cmd = 'status' if verbose: cmd += ' verbose' return", "port and protocol string pp += str(port) if protocol is", "= pr.data.get('iptype','') return ui.do_action(pr.action,rule,iptype,force) def reset(force=True): run('reset',force=force) def enable(): ui.set_enabled(True)", "return pr def run(actionstr, force=False): # run command with an", "port is not None: pp = \"\" # port and", "argv.extend(actionstr.split(' ')) # generate bogus argv to parse pr =", "udp protocol # ufw.deny() # default deny -- deny all", "enumeration changes after delete return False def status(verbose=True): cmd =", "# disable firewall # ufw.enable() # enable firewall # ufw.allow()", "not None: pp += '/' + protocol _allow(pp) def _allow(pp=None):", "= 'status' if verbose: cmd += ' verbose' return run(cmd)", "ufw.allow(22,'tcp') # allow port 22, tcp protocol # ufw.allow('22/tcp') #", "re-enumerate and continue def _delete(port): for i,rule in enumerate(backend.get_rules()): rule_port", "pp += '/' + protocol _deny(pp) def _deny(pp=None): # pp", "and continue def _delete(port): for i,rule in enumerate(backend.get_rules()): rule_port =", "[progName] argv.extend(actionstr.split(' ')) # generate bogus argv to parse pr", "protocol _deny(pp) def _deny(pp=None): # pp = port and protocol", "str(port) if protocol is not None: pp += '/' +", "if protocol is not None: pp += '/' + protocol", "# parse commands like \"allow 22\", \"reset\", \"default allow\" argv", "deny(port=None, protocol=None): # port int; protocol str ['tcp','udp'] pp =", "pp) def deny(port=None, protocol=None): # port int; protocol str ['tcp','udp']", "python #shamelessy stolen from: https://gitlab.com/dhj/easyufw # A thin wrapper over", "command line import ufw.frontend import ufw.common import gettext progName =", "= port and protocol string ['22','22/tcp','53/udp'] # port without protocol", "after delete return False def status(verbose=True): cmd = 'status' if", "22 # ufw.reset() # restore defaults # ufw.status() # return", "the thin wrapper that is ufw # Usage: # import", "delete(port): # delete all rules by destination port while _delete(port):", "run(\"delete \" + str(i+1), force=True) return True # delete one", "return True # delete one rule; enumeration changes after delete", "ufw.delete(22) # delete rules referencing port 22 # ufw.reset() #", "rule = pr.data.get('rule','') # commands like reset don't have a", "'_' not defined ui = ufw.frontend.UFWFrontend(False) # no dryrun --", "# deny port 22, tcp protocol # ufw.delete(22) # delete", "= _parse(actionstr) rule = pr.data.get('rule','') # commands like reset don't", "# ufw.deny(22,'tcp') # deny port 22, tcp protocol # ufw.delete(22)", "parse pr = parse_command(argv) return pr def run(actionstr, force=False): #", "argument pr = _parse(actionstr) rule = pr.data.get('rule','') # commands like", "# generate bogus argv to parse pr = parse_command(argv) return", "pr.data.get('rule','') # commands like reset don't have a rule iptype", "i,rule in enumerate(backend.get_rules()): rule_port = None try: rule_port = int(rule.dport)", "is ufw # Usage: # import easyufw as ufw #", "def _deny(pp=None): # pp = port and protocol string if", "by destination port while _delete(port): pass # while ports deleted", "not None: pp = \"\" # port and protocol string", "from: https://gitlab.com/dhj/easyufw # A thin wrapper over the thin wrapper", "tcp protocol # ufw.allow('22/tcp') # allow port 22, tcp protocol", "# ufw.delete(22) # delete rules referencing port 22 # ufw.reset()", "protocol # ufw.deny() # default deny -- deny all #", "None if rule_port is not None and port == rule_port:", "and protocol string if pp is None: run('default deny') else:", "# ufw.allow() # default allow -- allow all # ufw.allow(22)", "thin wrapper over the thin wrapper that is ufw #", "pp = port and protocol string ['22','22/tcp','53/udp'] # port without", "ufw.status() # return status string (default verbose=True) # ufw.run(\"allow 22\")", "if pp is None: run('default deny') else: run('deny ' +", "no dryrun -- do it live backend = ui.backend parse_command", "\"allow 22\", \"reset\", \"default allow\" argv = [progName] argv.extend(actionstr.split(' '))", "ufw.common import gettext progName = ufw.common.programName gettext.install(progName)#, unicode=True) # for", "= ufw.frontend.parse_command def _parse(actionstr): # parse commands like \"allow 22\",", "# port and protocol string pp += str(port) if protocol", "['tcp','udp'] pp = None if port is not None: pp", "# ufw.run(\"allow 22\") # directly run command as if from", "'/' + protocol _deny(pp) def _deny(pp=None): # pp = port", "+ protocol _deny(pp) def _deny(pp=None): # pp = port and", "run('default allow') else: run('allow ' + pp) def deny(port=None, protocol=None):", "Usage: # import easyufw as ufw # ufw.disable() # disable", "_allow(pp) def _allow(pp=None): # pp = port and protocol string", "#shamelessy stolen from: https://gitlab.com/dhj/easyufw # A thin wrapper over the", "port int; protocol str ['tcp','udp'] pp = None if port", "= port and protocol string if pp is None: run('default", "53, udp protocol # ufw.allow(53,'udp') # allow port 53, udp", "allow port 22, any protocol # ufw.allow(22,'tcp') # allow port", "ufw.allow(53,'udp') # allow port 53, udp protocol # ufw.deny() #", "destination port while _delete(port): pass # while ports deleted re-enumerate", "ufw.frontend import ufw.common import gettext progName = ufw.common.programName gettext.install(progName)#, unicode=True)", "ui.backend parse_command = ufw.frontend.parse_command def _parse(actionstr): # parse commands like", "rule_port = int(rule.dport) except: rule_port = None if rule_port is", "# allow port 22, tcp protocol # ufw.allow('22/tcp') # allow", "defaults # ufw.status() # return status string (default verbose=True) #", "fixes '_' not defined ui = ufw.frontend.UFWFrontend(False) # no dryrun", "# commands like reset don't have a rule iptype =", "and port == rule_port: run(\"delete \" + str(i+1), force=True) return", "line import ufw.frontend import ufw.common import gettext progName = ufw.common.programName", "try: rule_port = int(rule.dport) except: rule_port = None if rule_port", "ufw # ufw.disable() # disable firewall # ufw.enable() # enable", "run command with an explicit force argument pr = _parse(actionstr)", "port 53, udp protocol # ufw.deny() # default deny --", "gettext progName = ufw.common.programName gettext.install(progName)#, unicode=True) # for i18n; fixes", "ufw.enable() # enable firewall # ufw.allow() # default allow --", "reset(force=True): run('reset',force=force) def enable(): ui.set_enabled(True) def disable(): ui.set_enabled(False) def allow(port=None,", "delete all rules by destination port while _delete(port): pass #", "backend = ui.backend parse_command = ufw.frontend.parse_command def _parse(actionstr): # parse", "== rule_port: run(\"delete \" + str(i+1), force=True) return True #", "+ protocol _allow(pp) def _allow(pp=None): # pp = port and", "argv to parse pr = parse_command(argv) return pr def run(actionstr,", "gettext.install(progName)#, unicode=True) # for i18n; fixes '_' not defined ui", "protocol string if pp is None: run('default deny') else: run('deny", "continue def _delete(port): for i,rule in enumerate(backend.get_rules()): rule_port = None", "generate bogus argv to parse pr = parse_command(argv) return pr", "22, tcp protocol # ufw.allow(53,'udp') # allow port 53, udp", "# run command with an explicit force argument pr =", "stolen from: https://gitlab.com/dhj/easyufw # A thin wrapper over the thin", "None: run('default deny') else: run('deny ' + pp) def delete(port):", "# pp = port and protocol string if pp is", "live backend = ui.backend parse_command = ufw.frontend.parse_command def _parse(actionstr): #", "easyufw as ufw # ufw.disable() # disable firewall # ufw.enable()", "port 22, any protocol # ufw.allow(22,'tcp') # allow port 22,", "_deny(pp=None): # pp = port and protocol string if pp", "tcp protocol # ufw.allow(53,'udp') # allow port 53, udp protocol", "None: pp += '/' + protocol _allow(pp) def _allow(pp=None): #", "# A thin wrapper over the thin wrapper that is", "# pp = port and protocol string ['22','22/tcp','53/udp'] # port", "pp = \"\" # port and protocol string pp +=", "ufw.allow(22) # allow port 22, any protocol # ufw.allow(22,'tcp') #", "_delete(port): pass # while ports deleted re-enumerate and continue def", "changes after delete return False def status(verbose=True): cmd = 'status'", "return ui.do_action(pr.action,rule,iptype,force) def reset(force=True): run('reset',force=force) def enable(): ui.set_enabled(True) def disable():", "allow port 53, udp protocol # ufw.deny() # default deny", "import gettext progName = ufw.common.programName gettext.install(progName)#, unicode=True) # for i18n;", "def _delete(port): for i,rule in enumerate(backend.get_rules()): rule_port = None try:", "# delete all rules by destination port while _delete(port): pass", "disable(): ui.set_enabled(False) def allow(port=None, protocol=None): # port int; protocol str", "pp += '/' + protocol _allow(pp) def _allow(pp=None): # pp", "unicode=True) # for i18n; fixes '_' not defined ui =", "as ufw # ufw.disable() # disable firewall # ufw.enable() #", "# ufw.disable() # disable firewall # ufw.enable() # enable firewall", "# while ports deleted re-enumerate and continue def _delete(port): for" ]
[ "print(\"Simulation Step: \", step) test_funcs() libsalt.simulationStep() step = libsalt.getCurrentStep() libsalt.close()", "step) test_funcs() libsalt.simulationStep() step = libsalt.getCurrentStep() libsalt.close() print(\"Python: Simulation End!!!\")", "for vehicle in runnings: # print(\"Running Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRoute(vehicle.id).toString())", "print(\"Running Vehicle)\", vehicle.id, \":\", vehicle.toString()) #print(\"#Standby Vehicles: \", len(standbys)) #for", "vehicle.toString()) #for vehicle in standbys: # print(\"\\t\", vehicle.toString()) # for", "print(\"Python: Simulation End!!!\") def test_funcs(): standbys = libsalt.vehicle.getStandbyVehicles() runnings =", "step = libsalt.getCurrentStep() while step <= 36000: if (step %", "vehicle in runnings: # print(\"Running Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRoute(vehicle.id).toString()) #", "\":\", vehicle.toString()) if __name__ == \"__main__\": salt_scenario = r\"/home/mclee/project/traffic-simulator/data/dj_sample_data/2020-dj_sample.json\" test(salt_scenario)", "Simulation End!!!\") def test_funcs(): standbys = libsalt.vehicle.getStandbyVehicles() runnings = libsalt.vehicle.getRunningVehicles()", "print(\"Running Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRoute(vehicle.id).toString()) # print(\"Running Vehicle)\", vehicle.id, \":\",", "# print(\"Running Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRoute(vehicle.id).toString()) # print(\"Running Vehicle)\", vehicle.id,", "in runnings: # print(\"Running Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRoute(vehicle.id).toString()) # print(\"Running", "libsalt.close() print(\"Python: Simulation End!!!\") def test_funcs(): standbys = libsalt.vehicle.getStandbyVehicles() runnings", "runnings: # print(\"Running Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRoute(vehicle.id).toString()) # print(\"Running Vehicle)\",", "test(salt_scenario): libsalt.start(salt_scenario) libsalt.setCurrentStep(25200) step = libsalt.getCurrentStep() while step <= 36000:", "libsalt.vehicle.getRoute(vehicle.id).toString()) # print(\"Running Vehicle)\", vehicle.id, \":\", vehicle.toString()) #print(\"#Standby Vehicles: \",", "libsalt.setCurrentStep(25200) step = libsalt.getCurrentStep() while step <= 36000: if (step", "vehicle in standbys: # print(\"Standby Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRouteString(vehicle.id)) #print(\"Standby", "while step <= 36000: if (step % 100 == 0):", "= libsalt.vehicle.getStandbyVehicles() runnings = libsalt.vehicle.getRunningVehicles() print(\"#Running Vehicles: \", len(runnings)) #for", "libsalt.vehicle.getRunningVehicles() print(\"#Running Vehicles: \", len(runnings)) #for vehicle in runnings: #", "len(runnings)) #for vehicle in runnings: # print(\"\\t\", vehicle.toString()) #for vehicle", "Vehicles: \", len(standbys)) #for vehicle in standbys: # print(\"Standby Vehicle)\",", "if (step % 100 == 0): print(\"Simulation Step: \", step)", "print(\"\\t\", vehicle.toString()) #for vehicle in standbys: # print(\"\\t\", vehicle.toString()) #", "standbys: # print(\"Standby Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRouteString(vehicle.id)) #print(\"Standby Vehicle)\", vehicle.id,", "Vehicle)\", vehicle.id, \":\", vehicle.toString()) if __name__ == \"__main__\": salt_scenario =", "import libsalt def test(salt_scenario): libsalt.start(salt_scenario) libsalt.setCurrentStep(25200) step = libsalt.getCurrentStep() while", "# print(\"\\t\", vehicle.toString()) #for vehicle in standbys: # print(\"\\t\", vehicle.toString())", "#for vehicle in standbys: # print(\"\\t\", vehicle.toString()) # for vehicle", "runnings = libsalt.vehicle.getRunningVehicles() print(\"#Running Vehicles: \", len(runnings)) #for vehicle in", "libsalt.simulationStep() step = libsalt.getCurrentStep() libsalt.close() print(\"Python: Simulation End!!!\") def test_funcs():", "standbys: # print(\"\\t\", vehicle.toString()) # for vehicle in runnings: #", "# print(\"Running Vehicle)\", vehicle.id, \":\", vehicle.toString()) #print(\"#Standby Vehicles: \", len(standbys))", "len(standbys)) #for vehicle in standbys: # print(\"Standby Vehicle)\", vehicle.id, \":\",", "\", len(standbys)) #for vehicle in standbys: # print(\"Standby Vehicle)\", vehicle.id,", "Step: \", step) test_funcs() libsalt.simulationStep() step = libsalt.getCurrentStep() libsalt.close() print(\"Python:", "print(\"\\t\", vehicle.toString()) # for vehicle in runnings: # print(\"Running Vehicle)\",", "End!!!\") def test_funcs(): standbys = libsalt.vehicle.getStandbyVehicles() runnings = libsalt.vehicle.getRunningVehicles() print(\"#Running", "= libsalt.getCurrentStep() while step <= 36000: if (step % 100", "vehicle.toString()) #print(\"#Standby Vehicles: \", len(standbys)) #for vehicle in standbys: #", "0): print(\"Simulation Step: \", step) test_funcs() libsalt.simulationStep() step = libsalt.getCurrentStep()", "<= 36000: if (step % 100 == 0): print(\"Simulation Step:", "step = libsalt.getCurrentStep() libsalt.close() print(\"Python: Simulation End!!!\") def test_funcs(): standbys", "runnings: # print(\"\\t\", vehicle.toString()) #for vehicle in standbys: # print(\"\\t\",", "Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRouteString(vehicle.id)) #print(\"Standby Vehicle)\", vehicle.id, \":\", vehicle.toString()) if", "36000: if (step % 100 == 0): print(\"Simulation Step: \",", "\":\", libsalt.vehicle.getRoute(vehicle.id).toString()) # print(\"Running Vehicle)\", vehicle.id, \":\", vehicle.toString()) #print(\"#Standby Vehicles:", "print(\"#Running Vehicles: \", len(runnings)) #for vehicle in runnings: # print(\"\\t\",", "Vehicles: \", len(runnings)) #for vehicle in runnings: # print(\"\\t\", vehicle.toString())", "libsalt.vehicle.getRouteString(vehicle.id)) #print(\"Standby Vehicle)\", vehicle.id, \":\", vehicle.toString()) if __name__ == \"__main__\":", "#for vehicle in standbys: # print(\"Standby Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRouteString(vehicle.id))", "libsalt def test(salt_scenario): libsalt.start(salt_scenario) libsalt.setCurrentStep(25200) step = libsalt.getCurrentStep() while step", "\", step) test_funcs() libsalt.simulationStep() step = libsalt.getCurrentStep() libsalt.close() print(\"Python: Simulation", "vehicle.id, \":\", vehicle.toString()) if __name__ == \"__main__\": salt_scenario = r\"/home/mclee/project/traffic-simulator/data/dj_sample_data/2020-dj_sample.json\"", "<reponame>etri-city-traffic-brain/traffic-simulator import libsalt def test(salt_scenario): libsalt.start(salt_scenario) libsalt.setCurrentStep(25200) step = libsalt.getCurrentStep()", "in standbys: # print(\"\\t\", vehicle.toString()) # for vehicle in runnings:", "Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRoute(vehicle.id).toString()) # print(\"Running Vehicle)\", vehicle.id, \":\", vehicle.toString())", "def test(salt_scenario): libsalt.start(salt_scenario) libsalt.setCurrentStep(25200) step = libsalt.getCurrentStep() while step <=", "\", len(runnings)) #for vehicle in runnings: # print(\"\\t\", vehicle.toString()) #for", "libsalt.getCurrentStep() while step <= 36000: if (step % 100 ==", "= libsalt.vehicle.getRunningVehicles() print(\"#Running Vehicles: \", len(runnings)) #for vehicle in runnings:", "# print(\"\\t\", vehicle.toString()) # for vehicle in runnings: # print(\"Running", "# for vehicle in runnings: # print(\"Running Vehicle)\", vehicle.id, \":\",", "print(\"Standby Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRouteString(vehicle.id)) #print(\"Standby Vehicle)\", vehicle.id, \":\", vehicle.toString())", "libsalt.start(salt_scenario) libsalt.setCurrentStep(25200) step = libsalt.getCurrentStep() while step <= 36000: if", "step <= 36000: if (step % 100 == 0): print(\"Simulation", "#print(\"#Standby Vehicles: \", len(standbys)) #for vehicle in standbys: # print(\"Standby", "#for vehicle in runnings: # print(\"\\t\", vehicle.toString()) #for vehicle in", "vehicle in runnings: # print(\"\\t\", vehicle.toString()) #for vehicle in standbys:", "vehicle.id, \":\", libsalt.vehicle.getRoute(vehicle.id).toString()) # print(\"Running Vehicle)\", vehicle.id, \":\", vehicle.toString()) #print(\"#Standby", "vehicle.id, \":\", libsalt.vehicle.getRouteString(vehicle.id)) #print(\"Standby Vehicle)\", vehicle.id, \":\", vehicle.toString()) if __name__", "libsalt.vehicle.getStandbyVehicles() runnings = libsalt.vehicle.getRunningVehicles() print(\"#Running Vehicles: \", len(runnings)) #for vehicle", "100 == 0): print(\"Simulation Step: \", step) test_funcs() libsalt.simulationStep() step", "libsalt.getCurrentStep() libsalt.close() print(\"Python: Simulation End!!!\") def test_funcs(): standbys = libsalt.vehicle.getStandbyVehicles()", "\":\", vehicle.toString()) #print(\"#Standby Vehicles: \", len(standbys)) #for vehicle in standbys:", "% 100 == 0): print(\"Simulation Step: \", step) test_funcs() libsalt.simulationStep()", "in runnings: # print(\"\\t\", vehicle.toString()) #for vehicle in standbys: #", "vehicle.toString()) # for vehicle in runnings: # print(\"Running Vehicle)\", vehicle.id,", "Vehicle)\", vehicle.id, \":\", vehicle.toString()) #print(\"#Standby Vehicles: \", len(standbys)) #for vehicle", "test_funcs() libsalt.simulationStep() step = libsalt.getCurrentStep() libsalt.close() print(\"Python: Simulation End!!!\") def", "vehicle.id, \":\", vehicle.toString()) #print(\"#Standby Vehicles: \", len(standbys)) #for vehicle in", "# print(\"Standby Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRouteString(vehicle.id)) #print(\"Standby Vehicle)\", vehicle.id, \":\",", "(step % 100 == 0): print(\"Simulation Step: \", step) test_funcs()", "standbys = libsalt.vehicle.getStandbyVehicles() runnings = libsalt.vehicle.getRunningVehicles() print(\"#Running Vehicles: \", len(runnings))", "== 0): print(\"Simulation Step: \", step) test_funcs() libsalt.simulationStep() step =", "= libsalt.getCurrentStep() libsalt.close() print(\"Python: Simulation End!!!\") def test_funcs(): standbys =", "#print(\"Standby Vehicle)\", vehicle.id, \":\", vehicle.toString()) if __name__ == \"__main__\": salt_scenario", "in standbys: # print(\"Standby Vehicle)\", vehicle.id, \":\", libsalt.vehicle.getRouteString(vehicle.id)) #print(\"Standby Vehicle)\",", "vehicle in standbys: # print(\"\\t\", vehicle.toString()) # for vehicle in", "def test_funcs(): standbys = libsalt.vehicle.getStandbyVehicles() runnings = libsalt.vehicle.getRunningVehicles() print(\"#Running Vehicles:", "test_funcs(): standbys = libsalt.vehicle.getStandbyVehicles() runnings = libsalt.vehicle.getRunningVehicles() print(\"#Running Vehicles: \",", "\":\", libsalt.vehicle.getRouteString(vehicle.id)) #print(\"Standby Vehicle)\", vehicle.id, \":\", vehicle.toString()) if __name__ ==" ]
[ "for thisPath in sourceLayer.paths: newPath = GSPath() for n in", "windowHeight = 155 windowWidthResize = 120 # user can resize", "} ) self.w.include_components.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] ) self.w.include_anchors.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] ) self.w.include_metrics.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"]", "\"\"\"Copies width of sourceLayer to targetLayer.\"\"\" sourceWidth = sourceLayer.width if", "to targetLayer.\"\"\" anch_from = len( sourceLayer.anchors ) anch_into = len(", "and size ) self.w.text_1 = vanilla.TextBox((15, 12+2, 120, 14), \"Copy", "[scale_x, scale_y, rotation] class MasterFiller( object ): def __init__( self", "thisPath.nodes: newNode = GSNode() newNode.type = n.type newNode.connection = n.connection", "( compName ) targetLayer.components.append( newComp ) def copyAnchorsFromLayerToLayer( self, sourceLayer,", "print \"- Copying width (%.1f)\" % sourceWidth else: print \"-", "not changed (already was %.1f)\" % sourceWidth def buttonCallback( self,", "\"Include components\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_anchors = vanilla.CheckBox((15, 52+20, -100,", "self.w.include_anchors.get() metrYesNo = self.w.include_metrics.get() for thisGlyph in selectedGlyphs: try: print", "sourcelayer, targetlayer ) Font.enableUpdateInterface() except Exception, e: print e if", "Cleaning out components in target layer\" for i in range(", "i in range( num_into )[::-1]: del targetLayer.paths[i] if num_from >", "width by this value windowHeightResize = 0 # user can", "Glyphs.clearLog() Glyphs.showMacroWindow() print \"Copy Layer to Layer Protocol:\" Font =", "sizeStyle='small', callback=self.MasterChangeCallback) self.w.text_2 = vanilla.TextBox((15, 32+2, 120, 14), \"into selection", "return True def GetMasterNames( self ): myMasterList = [] for", "sizeStyle='small', callback=self.SavePreferences, value=True) self.w.copybutton = vanilla.Button((-80, -30, -15, -10), \"Copy\",", ") # copy anchors: if anchYesNo: self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer )", "= getComponentScaleX_scaleY_rotation(thisComp) newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2]) print \"-- Component: %s\" % ( compName", "for i in range( len( Glyphs.currentDocument.font.masters ) ): x =", "= ( windowWidth, windowHeight ), # minimum size (for resizing)", ") comp_into = len( targetLayer.components ) if comp_into != 0:", "thisGlyph.name sourcelayer = thisGlyph.layers[ index_from ] targetlayer = thisGlyph.layers[ index_into", "autosaveName = \"com.mekkablue.MasterFiller.mainwindow\" # stores last window position and size", "= vanilla.CheckBox((15, 52+38, -100, 20), \"Include metrics\", sizeStyle='small', callback=self.SavePreferences, value=True)", "else: self.w.copybutton.enable( True ) def copyPathsFromLayerToLayer( self, sourceLayer, targetLayer ):", "scale_y, rotation] class MasterFiller( object ): def __init__( self ):", "# Load Settings: if not self.LoadPreferences(): print \"Note: 'Copy Layer", "metrYesNo: self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer ) Font.enableUpdateInterface() except Exception, e: print", "\"\"\"Copies all paths from sourceLayer to targetLayer\"\"\" num_from = len(", "class MasterFiller( object ): def __init__( self ): # Window", "sourcelayer = thisGlyph.layers[ index_from ] targetlayer = thisGlyph.layers[ index_into ]", "self ): myMasterList = [] for i in range( len(", "= vanilla.PopUpButton((120, 32, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.include_components =", "but once fixed a problem newComp = GSComponent( compName )", "len( sourceLayer.components ) comp_into = len( targetLayer.components ) if comp_into", "components from sourceLayer to targetLayer.\"\"\" comp_from = len( sourceLayer.components )", "fixed a problem newComp = GSComponent( compName ) newComp.setPosition_( (thisComp.x,", ") Font.enableUpdateInterface() except Exception, e: print e if not self.w.keep_window_open.get():", "comp_from > 0: print \"- Copying components:\" for thisComp in", "\"Include metrics\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.keep_window_open = vanilla.CheckBox((15, 52+56, -100,", "anchors from sourceLayer to targetLayer.\"\"\" anch_from = len( sourceLayer.anchors )", "anch_from > 0: print \"- Copying anchors from source layer:\"", "155 windowWidthResize = 120 # user can resize width by", "\"com.mekkablue.MasterFiller.mainwindow\" # stores last window position and size ) self.w.text_1", "Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] ) self.w.keep_window_open.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] ) except: return False return True", "!= 0: print \"- Cleaning out components in target layer\"", "metrics\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.keep_window_open = vanilla.CheckBox((15, 52+56, -100, 20),", "\"- Copying components:\" for thisComp in sourceLayer.components: compName = str(", "Glyphs.currentDocument selectedGlyphs = [ x.parent for x in Font.selectedLayers ]", "self, sourceLayer, targetLayer ): \"\"\"Copies width of sourceLayer to targetLayer.\"\"\"", "height by this value self.w = vanilla.FloatingWindow( ( windowWidth, windowHeight", "= math.sqrt(math.pow(c,2)+math.pow(d,2)) if (b<0 and c<0): scale_y = scale_y *", "12, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.text_2 = vanilla.TextBox((15, 32+2,", "# user can resize width by this value windowHeightResize =", "[] for i in range( len( Glyphs.currentDocument.font.masters ) ): x", "newAnchor ) def copyMetricsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies width", "self.w.master_from.get() == self.w.master_into.get(): self.w.copybutton.enable( False ) else: self.w.copybutton.enable( True )", "% sourceWidth def buttonCallback( self, sender ): Glyphs.clearLog() Glyphs.showMacroWindow() print", "return myMasterList def MasterChangeCallback( self, sender ): if self.w.master_from.get() ==", "minSize = ( windowWidth, windowHeight ), # minimum size (for", "vanilla.Button((-80, -30, -15, -10), \"Copy\", sizeStyle='small', callback=self.buttonCallback) self.w.setDefaultButton( self.w.copybutton )", "!= sourceWidth: targetLayer.width = sourceWidth print \"- Copying width (%.1f)\"", "anchors: if anchYesNo: self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer ) # copy metrics:", "\"into selection of\", sizeStyle='small') self.w.master_into = vanilla.PopUpButton((120, 32, -15, 17),", "def copyPathsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all paths from", "SavePreferences( self, sender ): try: Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] = self.w.include_components.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] =", "vanilla.PopUpButton((120, 12, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.text_2 = vanilla.TextBox((15,", "compName ) newComp.setPosition_( (thisComp.x, thisComp.y) ) ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp) newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2])", "self.w.include_metrics.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] ) self.w.keep_window_open.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] ) except: return False return", "(for resizing) autosaveName = \"com.mekkablue.MasterFiller.mainwindow\" # stores last window position", "): \"\"\"Copies all paths from sourceLayer to targetLayer\"\"\" num_from =", "newPath ) def copyComponentsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all", "num_from = len( sourceLayer.paths ) num_into = len( targetLayer.paths )", "-*- coding: utf-8 -*- __doc__=\"\"\" Copies one master to another", ") newPath.addNode_( newNode ) newPath.closed = thisPath.closed targetLayer.paths.append( newPath )", "self ): # Window 'self.w': windowWidth = 280 windowHeight =", "problem newComp = GSComponent( compName ) newComp.setPosition_( (thisComp.x, thisComp.y) )", "sourceLayer, targetLayer ): \"\"\"Copies all paths from sourceLayer to targetLayer\"\"\"", "self.w.keep_window_open = vanilla.CheckBox((15, 52+56, -100, 20), \"Keep window open\", sizeStyle='small',", "thisAnchor.y ) newAnchor = GSAnchor( anchorName, anchorPosition ) print \"--", ") if anch_into != 0: print \"- Cleaning out anchors", "Cleaning out anchors in target layer\" sourceLayer.setAnchors_( None ) if", "Layer' could not load preferences. Will resort to defaults.\" self.w.open()", "Copies one master to another master in selected glyphs. \"\"\"", "targetLayer.paths[i] if num_from > 0: print \"- Copying paths\" for", "sourceWidth print \"- Copying width (%.1f)\" % sourceWidth else: print", "in target layer\" for i in range( num_into )[::-1]: del", "\"com.mekkablue.MasterFiller.include_components\" : \"1\", \"com.mekkablue.MasterFiller.include_anchors\" : \"1\", \"com.mekkablue.MasterFiller.include_metrics\" : \"1\", \"com.mekkablue.MasterFiller.keep_window_open\"", "def GetMasterNames( self ): myMasterList = [] for i in", "comp_from = len( sourceLayer.components ) comp_into = len( targetLayer.components )", "value=True) self.w.include_anchors = vanilla.CheckBox((15, 52+20, -100, 20), \"Include anchors\", sizeStyle='small',", "-100, 20), \"Include anchors\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_metrics = vanilla.CheckBox((15,", "self, sender ): try: Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] = self.w.include_components.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] = self.w.include_anchors.get()", "for x in Font.selectedLayers ] index_from = self.w.master_from.get() index_into =", "sizeStyle='small', callback=self.SavePreferences, value=True) self.w.keep_window_open = vanilla.CheckBox((15, 52+56, -100, 20), \"Keep", "copyComponentsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all components from sourceLayer", "( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum", "targetLayer.paths ) if num_into != 0: print \"- Cleaning out", "def copyAnchorsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all anchors from", "= self.transform[3] scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2)) scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2)) if (b<0", "LoadPreferences( self ): try: NSUserDefaults.standardUserDefaults().registerDefaults_( { \"com.mekkablue.MasterFiller.include_components\" : \"1\", \"com.mekkablue.MasterFiller.include_anchors\"", "= thisPath.closed targetLayer.paths.append( newPath ) def copyComponentsFromLayerToLayer( self, sourceLayer, targetLayer", "targetLayer.width = sourceWidth print \"- Copying width (%.1f)\" % sourceWidth", "print \"- Cleaning out anchors in target layer\" sourceLayer.setAnchors_( None", "= self.w.master_into.get() compYesNo = self.w.include_components.get() anchYesNo = self.w.include_anchors.get() metrYesNo =", "del targetLayer.paths[i] if num_from > 0: print \"- Copying paths\"", "(n.x, n.y) ) newPath.addNode_( newNode ) newPath.closed = thisPath.closed targetLayer.paths.append(", "in sourceLayer.components: compName = str( thisComp.componentName ) # str() probably", "self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer ) # copy metrics: if metrYesNo: self.copyMetricsFromLayerToLayer(", "print \"-- Component: %s\" % ( compName ) targetLayer.components.append( newComp", "windowHeight ), # default window size \"Copy layer to layer\",", "vanilla.CheckBox((15, 52+2, -100, 20), \"Include components\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_anchors", "52+56, -100, 20), \"Keep window open\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.copybutton", "else: print \"- Width not changed (already was %.1f)\" %", "math.atan2(b, a) * (180/math.pi) return [scale_x, scale_y, rotation] class MasterFiller(", "= GSNode() newNode.type = n.type newNode.connection = n.connection newNode.setPosition_( (n.x,", "index_into = self.w.master_into.get() compYesNo = self.w.include_components.get() anchYesNo = self.w.include_anchors.get() metrYesNo", "ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp) newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2]) print \"-- Component: %s\" % (", "'self.w': windowWidth = 280 windowHeight = 155 windowWidthResize = 120", "changed (already was %.1f)\" % sourceWidth def buttonCallback( self, sender", "] Font.disableUpdateInterface() # copy paths: self.copyPathsFromLayerToLayer( sourcelayer, targetlayer ) #", "Glyphs.currentDocument.font.masters[i] myMasterList.append( '%i: %s' % (i, x.name) ) return myMasterList", "for thisAnchor in sourceLayer.anchors: anchorName = thisAnchor.name anchorPosition = NSPoint(", "num_from > 0: print \"- Copying paths\" for thisPath in", "vanilla.FloatingWindow( ( windowWidth, windowHeight ), # default window size \"Copy", "self.w.copybutton ) # Load Settings: if not self.LoadPreferences(): print \"Note:", "a = self.transform[0] b = self.transform[1] c = self.transform[2] d", "x in Font.selectedLayers ] index_from = self.w.master_from.get() index_into = self.w.master_into.get()", "GSNode() newNode.type = n.type newNode.connection = n.connection newNode.setPosition_( (n.x, n.y)", "# window title minSize = ( windowWidth, windowHeight ), #", "\"1\", \"com.mekkablue.MasterFiller.include_metrics\" : \"1\", \"com.mekkablue.MasterFiller.keep_window_open\" : \"1\" } ) self.w.include_components.set(", "Copying paths\" for thisPath in sourceLayer.paths: newPath = GSPath() for", "targetlayer = thisGlyph.layers[ index_into ] Font.disableUpdateInterface() # copy paths: self.copyPathsFromLayerToLayer(", ") newAnchor = GSAnchor( anchorName, anchorPosition ) print \"-- %s", "sourceWidth = sourceLayer.width if targetLayer.width != sourceWidth: targetLayer.width = sourceWidth", "\"- Width not changed (already was %.1f)\" % sourceWidth def", "metrics: if metrYesNo: self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer ) Font.enableUpdateInterface() except Exception,", "anchYesNo: self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer ) # copy metrics: if metrYesNo:", "windowHeightResize ), # maximum size (for resizing) autosaveName = \"com.mekkablue.MasterFiller.mainwindow\"", "GetMasterNames( self ): myMasterList = [] for i in range(", "\"- Copying paths\" for thisPath in sourceLayer.paths: newPath = GSPath()", "(for resizing) maxSize = ( windowWidth + windowWidthResize, windowHeight +", "newNode.connection = n.connection newNode.setPosition_( (n.x, n.y) ) newPath.addNode_( newNode )", "window position and size ) self.w.text_1 = vanilla.TextBox((15, 12+2, 120,", "if anchYesNo: self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer ) # copy metrics: if", "'Copy Layer to Layer' could not load preferences. Will resort", ") targetLayer.components.append( newComp ) def copyAnchorsFromLayerToLayer( self, sourceLayer, targetLayer ):", "sourceLayer to targetLayer.\"\"\" comp_from = len( sourceLayer.components ) comp_into =", "280 windowHeight = 155 windowWidthResize = 120 # user can", "vanilla.TextBox((15, 12+2, 120, 14), \"Copy paths from\", sizeStyle='small') self.w.master_from =", "windowHeightResize = 0 # user can resize height by this", "\"com.mekkablue.MasterFiller.include_metrics\" : \"1\", \"com.mekkablue.MasterFiller.keep_window_open\" : \"1\" } ) self.w.include_components.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"]", "Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] = self.w.include_components.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] = self.w.include_anchors.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] = self.w.include_metrics.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"]", "callback=self.SavePreferences, value=True) self.w.include_anchors = vanilla.CheckBox((15, 52+20, -100, 20), \"Include anchors\",", "self.w.include_metrics = vanilla.CheckBox((15, 52+38, -100, 20), \"Include metrics\", sizeStyle='small', callback=self.SavePreferences,", "sourceLayer.anchors ) anch_into = len( targetLayer.anchors ) if anch_into !=", "comp_into = len( targetLayer.components ) if comp_into != 0: print", "# copy paths: self.copyPathsFromLayerToLayer( sourcelayer, targetlayer ) # copy components:", "all paths from sourceLayer to targetLayer\"\"\" num_from = len( sourceLayer.paths", "except Exception, e: print e if not self.w.keep_window_open.get(): self.w.close() MasterFiller()", "in range( len( Glyphs.currentDocument.font.masters ) ): x = Glyphs.currentDocument.font.masters[i] myMasterList.append(", "newNode ) newPath.closed = thisPath.closed targetLayer.paths.append( newPath ) def copyComponentsFromLayerToLayer(", ": \"1\", \"com.mekkablue.MasterFiller.include_metrics\" : \"1\", \"com.mekkablue.MasterFiller.keep_window_open\" : \"1\" } )", "myMasterList.append( '%i: %s' % (i, x.name) ) return myMasterList def", "compYesNo: self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer ) # copy anchors: if anchYesNo:", "len( sourceLayer.anchors ) anch_into = len( targetLayer.anchors ) if anch_into", "print \"Copy Layer to Layer Protocol:\" Font = Glyphs.font Doc", "# copy anchors: if anchYesNo: self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer ) #", "print \"- Cleaning out components in target layer\" for i", ") ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp) newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2]) print \"-- Component: %s\" %", "in Font.selectedLayers ] index_from = self.w.master_from.get() index_into = self.w.master_into.get() compYesNo", ") self.w.include_metrics.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] ) self.w.keep_window_open.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] ) except: return False", "17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.text_2 = vanilla.TextBox((15, 32+2, 120, 14),", "= 280 windowHeight = 155 windowWidthResize = 120 # user", "= self.w.include_anchors.get() metrYesNo = self.w.include_metrics.get() for thisGlyph in selectedGlyphs: try:", "__doc__=\"\"\" Copies one master to another master in selected glyphs.", "from sourceLayer to targetLayer.\"\"\" comp_from = len( sourceLayer.components ) comp_into", ") self.w.text_1 = vanilla.TextBox((15, 12+2, 120, 14), \"Copy paths from\",", "0: print \"- Copying components:\" for thisComp in sourceLayer.components: compName", "scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2)) scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2)) if (b<0 and c<0):", "vanilla.CheckBox((15, 52+56, -100, 20), \"Keep window open\", sizeStyle='small', callback=self.SavePreferences, value=True)", "callback=self.SavePreferences, value=True) self.w.keep_window_open = vanilla.CheckBox((15, 52+56, -100, 20), \"Keep window", "GSAnchor( anchorName, anchorPosition ) print \"-- %s (%i, %i)\" %", "copyMetricsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies width of sourceLayer to", "sizeStyle='small', callback=self.buttonCallback) self.w.setDefaultButton( self.w.copybutton ) # Load Settings: if not", "d = self.transform[3] scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2)) scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2)) if", "20), \"Keep window open\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.copybutton = vanilla.Button((-80,", "target layer\" for i in range( comp_into )[::-1]: del targetLayer.components[i]", "anymore, but once fixed a problem newComp = GSComponent( compName", "#MenuTitle: Copy Layer to Layer # -*- coding: utf-8 -*-", "Load Settings: if not self.LoadPreferences(): print \"Note: 'Copy Layer to", "32+2, 120, 14), \"into selection of\", sizeStyle='small') self.w.master_into = vanilla.PopUpButton((120,", "(180/math.pi) return [scale_x, scale_y, rotation] class MasterFiller( object ): def", "sizeStyle='small') self.w.master_from = vanilla.PopUpButton((120, 12, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback)", "layer:\" for thisAnchor in sourceLayer.anchors: anchorName = thisAnchor.name anchorPosition =", "): try: NSUserDefaults.standardUserDefaults().registerDefaults_( { \"com.mekkablue.MasterFiller.include_components\" : \"1\", \"com.mekkablue.MasterFiller.include_anchors\" : \"1\",", "): x = Glyphs.currentDocument.font.masters[i] myMasterList.append( '%i: %s' % (i, x.name)", "def copyMetricsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies width of sourceLayer", "import GlyphsApp import vanilla import math def getComponentScaleX_scaleY_rotation( self ):", "targetLayer.paths.append( newPath ) def copyComponentsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies", "try: print \"\\nProcessing\", thisGlyph.name sourcelayer = thisGlyph.layers[ index_from ] targetlayer", "value windowHeightResize = 0 # user can resize height by", "resizing) maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize", "this value self.w = vanilla.FloatingWindow( ( windowWidth, windowHeight ), #", ") print \"-- %s (%i, %i)\" % ( anchorName, anchorPosition.x,", "Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] ) except: return False return True def GetMasterNames( self", "from sourceLayer to targetLayer.\"\"\" anch_from = len( sourceLayer.anchors ) anch_into", "targetLayer.\"\"\" sourceWidth = sourceLayer.width if targetLayer.width != sourceWidth: targetLayer.width =", "= 0 # user can resize height by this value", "paths in target layer\" for i in range( num_into )[::-1]:", "self.transform[0] b = self.transform[1] c = self.transform[2] d = self.transform[3]", "newNode.type = n.type newNode.connection = n.connection newNode.setPosition_( (n.x, n.y) )", "user can resize height by this value self.w = vanilla.FloatingWindow(", "utf-8 -*- __doc__=\"\"\" Copies one master to another master in", "sourceLayer.components ) comp_into = len( targetLayer.components ) if comp_into !=", "resize height by this value self.w = vanilla.FloatingWindow( ( windowWidth,", "newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2]) print \"-- Component: %s\" % ( compName ) targetLayer.components.append(", "anchors from source layer:\" for thisAnchor in sourceLayer.anchors: anchorName =", "targetLayer ): \"\"\"Copies all paths from sourceLayer to targetLayer\"\"\" num_from", "+ windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for", "0: print \"- Copying anchors from source layer:\" for thisAnchor", "targetLayer.components ) if comp_into != 0: print \"- Cleaning out", ") self.w.keep_window_open.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] ) except: return False return True def", "True def GetMasterNames( self ): myMasterList = [] for i", ") self.w.include_anchors.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] ) self.w.include_metrics.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] ) self.w.keep_window_open.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] )", "GSComponent( compName ) newComp.setPosition_( (thisComp.x, thisComp.y) ) ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp)", "myMasterList def MasterChangeCallback( self, sender ): if self.w.master_from.get() == self.w.master_into.get():", "callback=self.MasterChangeCallback) self.w.include_components = vanilla.CheckBox((15, 52+2, -100, 20), \"Include components\", sizeStyle='small',", "= ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), #", "= self.w.include_metrics.get() for thisGlyph in selectedGlyphs: try: print \"\\nProcessing\", thisGlyph.name", "print \"\\nProcessing\", thisGlyph.name sourcelayer = thisGlyph.layers[ index_from ] targetlayer =", "0: print \"- Cleaning out paths in target layer\" for", "targetLayer\"\"\" num_from = len( sourceLayer.paths ) num_into = len( targetLayer.paths", "in selected glyphs. \"\"\" import GlyphsApp import vanilla import math", "thisPath.closed targetLayer.paths.append( newPath ) def copyComponentsFromLayerToLayer( self, sourceLayer, targetLayer ):", "Protocol:\" Font = Glyphs.font Doc = Glyphs.currentDocument selectedGlyphs = [", "vanilla import math def getComponentScaleX_scaleY_rotation( self ): a = self.transform[0]", "print \"- Copying anchors from source layer:\" for thisAnchor in", "\"\"\"Copies all components from sourceLayer to targetLayer.\"\"\" comp_from = len(", "# copy metrics: if metrYesNo: self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer ) Font.enableUpdateInterface()", "to Layer # -*- coding: utf-8 -*- __doc__=\"\"\" Copies one", "= vanilla.CheckBox((15, 52+2, -100, 20), \"Include components\", sizeStyle='small', callback=self.SavePreferences, value=True)", "value=True) self.w.include_metrics = vanilla.CheckBox((15, 52+38, -100, 20), \"Include metrics\", sizeStyle='small',", "self ): try: NSUserDefaults.standardUserDefaults().registerDefaults_( { \"com.mekkablue.MasterFiller.include_components\" : \"1\", \"com.mekkablue.MasterFiller.include_anchors\" :", "# copy components: if compYesNo: self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer ) #", "sourceLayer.anchors: anchorName = thisAnchor.name anchorPosition = NSPoint( thisAnchor.x, thisAnchor.y )", "thisGlyph.layers[ index_into ] Font.disableUpdateInterface() # copy paths: self.copyPathsFromLayerToLayer( sourcelayer, targetlayer", "= vanilla.CheckBox((15, 52+56, -100, 20), \"Keep window open\", sizeStyle='small', callback=self.SavePreferences,", "False return True def GetMasterNames( self ): myMasterList = []", ") self.w.include_components.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] ) self.w.include_anchors.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] ) self.w.include_metrics.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] )", "\"1\" } ) self.w.include_components.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] ) self.w.include_anchors.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] ) self.w.include_metrics.set(", "if comp_into != 0: print \"- Cleaning out components in", "def getComponentScaleX_scaleY_rotation( self ): a = self.transform[0] b = self.transform[1]", "sizeStyle='small') self.w.master_into = vanilla.PopUpButton((120, 32, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback)", "sourceLayer.width if targetLayer.width != sourceWidth: targetLayer.width = sourceWidth print \"-", "once fixed a problem newComp = GSComponent( compName ) newComp.setPosition_(", "in sourceLayer.paths: newPath = GSPath() for n in thisPath.nodes: newNode", "self.w.master_from.get() index_into = self.w.master_into.get() compYesNo = self.w.include_components.get() anchYesNo = self.w.include_anchors.get()", "all components from sourceLayer to targetLayer.\"\"\" comp_from = len( sourceLayer.components", "* -1 rotation = math.atan2(b, a) * (180/math.pi) return [scale_x,", "window title minSize = ( windowWidth, windowHeight ), # minimum", "= n.connection newNode.setPosition_( (n.x, n.y) ) newPath.addNode_( newNode ) newPath.closed", "except: return False return True def LoadPreferences( self ): try:", "# stores last window position and size ) self.w.text_1 =", "i in range( len( Glyphs.currentDocument.font.masters ) ): x = Glyphs.currentDocument.font.masters[i]", ") except: return False return True def GetMasterNames( self ):", "windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)", "probably not necessary anymore, but once fixed a problem newComp", "targetLayer ): \"\"\"Copies all anchors from sourceLayer to targetLayer.\"\"\" anch_from", "myMasterList = [] for i in range( len( Glyphs.currentDocument.font.masters )", "self.w.copybutton.enable( True ) def copyPathsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies", "str() probably not necessary anymore, but once fixed a problem", "anchorName = thisAnchor.name anchorPosition = NSPoint( thisAnchor.x, thisAnchor.y ) newAnchor", "import math def getComponentScaleX_scaleY_rotation( self ): a = self.transform[0] b", "minimum size (for resizing) maxSize = ( windowWidth + windowWidthResize,", "to another master in selected glyphs. \"\"\" import GlyphsApp import", "layer\", # window title minSize = ( windowWidth, windowHeight ),", "return False return True def GetMasterNames( self ): myMasterList =", "range( len( Glyphs.currentDocument.font.masters ) ): x = Glyphs.currentDocument.font.masters[i] myMasterList.append( '%i:", ": \"1\" } ) self.w.include_components.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] ) self.w.include_anchors.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] )", "Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] = self.w.keep_window_open.get() except: return False return True def LoadPreferences(", "self.w.copybutton.enable( False ) else: self.w.copybutton.enable( True ) def copyPathsFromLayerToLayer( self,", "index_from ] targetlayer = thisGlyph.layers[ index_into ] Font.disableUpdateInterface() # copy", "self.copyPathsFromLayerToLayer( sourcelayer, targetlayer ) # copy components: if compYesNo: self.copyComponentsFromLayerToLayer(", "anchorName, anchorPosition.x, anchorPosition.y ) targetLayer.addAnchor_( newAnchor ) def copyMetricsFromLayerToLayer( self,", "master in selected glyphs. \"\"\" import GlyphsApp import vanilla import", "thisAnchor.name anchorPosition = NSPoint( thisAnchor.x, thisAnchor.y ) newAnchor = GSAnchor(", "newPath.closed = thisPath.closed targetLayer.paths.append( newPath ) def copyComponentsFromLayerToLayer( self, sourceLayer,", "windowHeight ), # minimum size (for resizing) maxSize = (", "if anch_into != 0: print \"- Cleaning out anchors in", "self.w.include_components.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] = self.w.include_anchors.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] = self.w.include_metrics.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] = self.w.keep_window_open.get()", "%s' % (i, x.name) ) return myMasterList def MasterChangeCallback( self,", "self, sourceLayer, targetLayer ): \"\"\"Copies all anchors from sourceLayer to", "self.w.include_anchors.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] ) self.w.include_metrics.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] ) self.w.keep_window_open.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] ) except:", "user can resize width by this value windowHeightResize = 0", "return True def LoadPreferences( self ): try: NSUserDefaults.standardUserDefaults().registerDefaults_( { \"com.mekkablue.MasterFiller.include_components\"", "= self.w.include_anchors.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] = self.w.include_metrics.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] = self.w.keep_window_open.get() except: return", "try: NSUserDefaults.standardUserDefaults().registerDefaults_( { \"com.mekkablue.MasterFiller.include_components\" : \"1\", \"com.mekkablue.MasterFiller.include_anchors\" : \"1\", \"com.mekkablue.MasterFiller.include_metrics\"", "thisPath in sourceLayer.paths: newPath = GSPath() for n in thisPath.nodes:", "52+38, -100, 20), \"Include metrics\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.keep_window_open =", "len( Glyphs.currentDocument.font.masters ) ): x = Glyphs.currentDocument.font.masters[i] myMasterList.append( '%i: %s'", "callback=self.SavePreferences, value=True) self.w.copybutton = vanilla.Button((-80, -30, -15, -10), \"Copy\", sizeStyle='small',", "\"Copy\", sizeStyle='small', callback=self.buttonCallback) self.w.setDefaultButton( self.w.copybutton ) # Load Settings: if", "Width not changed (already was %.1f)\" % sourceWidth def buttonCallback(", "self.w.include_anchors = vanilla.CheckBox((15, 52+20, -100, 20), \"Include anchors\", sizeStyle='small', callback=self.SavePreferences,", "compYesNo = self.w.include_components.get() anchYesNo = self.w.include_anchors.get() metrYesNo = self.w.include_metrics.get() for", ") return myMasterList def MasterChangeCallback( self, sender ): if self.w.master_from.get()", "\"-- Component: %s\" % ( compName ) targetLayer.components.append( newComp )", "-100, 20), \"Keep window open\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.copybutton =", "rotation] class MasterFiller( object ): def __init__( self ): #", "sourceLayer.paths: newPath = GSPath() for n in thisPath.nodes: newNode =", "sizeStyle='small', callback=self.MasterChangeCallback) self.w.include_components = vanilla.CheckBox((15, 52+2, -100, 20), \"Include components\",", "to targetLayer.\"\"\" comp_from = len( sourceLayer.components ) comp_into = len(", "print \"- Cleaning out paths in target layer\" for i", "self.w.master_from = vanilla.PopUpButton((120, 12, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.text_2", "self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.text_2 = vanilla.TextBox((15, 32+2, 120, 14), \"into", ") # Load Settings: if not self.LoadPreferences(): print \"Note: 'Copy", "in thisPath.nodes: newNode = GSNode() newNode.type = n.type newNode.connection =", "GlyphsApp import vanilla import math def getComponentScaleX_scaleY_rotation( self ): a", "if not self.LoadPreferences(): print \"Note: 'Copy Layer to Layer' could", "scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2)) if (b<0 and c<0): scale_y = scale_y", "getComponentScaleX_scaleY_rotation(thisComp) newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2]) print \"-- Component: %s\" % ( compName )", "Layer # -*- coding: utf-8 -*- __doc__=\"\"\" Copies one master", "out components in target layer\" for i in range( comp_into", "0: print \"- Copying paths\" for thisPath in sourceLayer.paths: newPath", "n.connection newNode.setPosition_( (n.x, n.y) ) newPath.addNode_( newNode ) newPath.closed =", "52+20, -100, 20), \"Include anchors\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_metrics =", "+ windowHeightResize ), # maximum size (for resizing) autosaveName =", "not load preferences. Will resort to defaults.\" self.w.open() self.w.makeKey() self.w.master_into.set(1)", "= scale_y * -1 rotation = math.atan2(b, a) * (180/math.pi)", "Copying anchors from source layer:\" for thisAnchor in sourceLayer.anchors: anchorName", "): a = self.transform[0] b = self.transform[1] c = self.transform[2]", "sourceWidth def buttonCallback( self, sender ): Glyphs.clearLog() Glyphs.showMacroWindow() print \"Copy", "callback=self.buttonCallback) self.w.setDefaultButton( self.w.copybutton ) # Load Settings: if not self.LoadPreferences():", "print \"- Copying components:\" for thisComp in sourceLayer.components: compName =", "= thisGlyph.layers[ index_into ] Font.disableUpdateInterface() # copy paths: self.copyPathsFromLayerToLayer( sourcelayer,", "Font.enableUpdateInterface() except Exception, e: print e if not self.w.keep_window_open.get(): self.w.close()", "= vanilla.TextBox((15, 32+2, 120, 14), \"into selection of\", sizeStyle='small') self.w.master_into", "\"- Cleaning out paths in target layer\" for i in", "): \"\"\"Copies all components from sourceLayer to targetLayer.\"\"\" comp_from =", "glyphs. \"\"\" import GlyphsApp import vanilla import math def getComponentScaleX_scaleY_rotation(", "= [] for i in range( len( Glyphs.currentDocument.font.masters ) ):", "copy paths: self.copyPathsFromLayerToLayer( sourcelayer, targetlayer ) # copy components: if", "c<0): scale_y = scale_y * -1 rotation = math.atan2(b, a)", "width of sourceLayer to targetLayer.\"\"\" sourceWidth = sourceLayer.width if targetLayer.width", "= vanilla.CheckBox((15, 52+20, -100, 20), \"Include anchors\", sizeStyle='small', callback=self.SavePreferences, value=True)", "scale_y * -1 rotation = math.atan2(b, a) * (180/math.pi) return", "= len( sourceLayer.anchors ) anch_into = len( targetLayer.anchors ) if", ")[::-1]: del targetLayer.paths[i] if num_from > 0: print \"- Copying", "self.w.include_metrics.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] = self.w.keep_window_open.get() except: return False return True def", "120, 14), \"Copy paths from\", sizeStyle='small') self.w.master_from = vanilla.PopUpButton((120, 12,", "size ) self.w.text_1 = vanilla.TextBox((15, 12+2, 120, 14), \"Copy paths", "-15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.text_2 = vanilla.TextBox((15, 32+2, 120,", "def LoadPreferences( self ): try: NSUserDefaults.standardUserDefaults().registerDefaults_( { \"com.mekkablue.MasterFiller.include_components\" : \"1\",", "self.transform[2] d = self.transform[3] scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2)) scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2))", "for i in range( num_into )[::-1]: del targetLayer.paths[i] if num_from", "anch_into != 0: print \"- Cleaning out anchors in target", "all anchors from sourceLayer to targetLayer.\"\"\" anch_from = len( sourceLayer.anchors", "by this value self.w = vanilla.FloatingWindow( ( windowWidth, windowHeight ),", "> 0: print \"- Copying components:\" for thisComp in sourceLayer.components:", "was %.1f)\" % sourceWidth def buttonCallback( self, sender ): Glyphs.clearLog()", "% (i, x.name) ) return myMasterList def MasterChangeCallback( self, sender", "targetLayer.components[i] if comp_from > 0: print \"- Copying components:\" for", ") def copyPathsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all paths", "callback=self.MasterChangeCallback) self.w.text_2 = vanilla.TextBox((15, 32+2, 120, 14), \"into selection of\",", "targetlayer ) Font.enableUpdateInterface() except Exception, e: print e if not", "20), \"Include metrics\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.keep_window_open = vanilla.CheckBox((15, 52+56,", "newPath.addNode_( newNode ) newPath.closed = thisPath.closed targetLayer.paths.append( newPath ) def", "self.w.master_into.set(1) def SavePreferences( self, sender ): try: Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] = self.w.include_components.get()", "-30, -15, -10), \"Copy\", sizeStyle='small', callback=self.buttonCallback) self.w.setDefaultButton( self.w.copybutton ) #", "vanilla.PopUpButton((120, 32, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.include_components = vanilla.CheckBox((15,", "= self.transform[0] b = self.transform[1] c = self.transform[2] d =", "if num_from > 0: print \"- Copying paths\" for thisPath", "anchors in target layer\" sourceLayer.setAnchors_( None ) if anch_from >", "'%i: %s' % (i, x.name) ) return myMasterList def MasterChangeCallback(", "size (for resizing) autosaveName = \"com.mekkablue.MasterFiller.mainwindow\" # stores last window", "copyPathsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all paths from sourceLayer", "= self.w.include_components.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] = self.w.include_anchors.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] = self.w.include_metrics.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] =", ") anch_into = len( targetLayer.anchors ) if anch_into != 0:", "print \"- Width not changed (already was %.1f)\" % sourceWidth", "False ) else: self.w.copybutton.enable( True ) def copyPathsFromLayerToLayer( self, sourceLayer,", "for i in range( comp_into )[::-1]: del targetLayer.components[i] if comp_from", "targetLayer.\"\"\" comp_from = len( sourceLayer.components ) comp_into = len( targetLayer.components", "sourceLayer.setAnchors_( None ) if anch_from > 0: print \"- Copying", "# minimum size (for resizing) maxSize = ( windowWidth +", "if targetLayer.width != sourceWidth: targetLayer.width = sourceWidth print \"- Copying", "print \"- Copying paths\" for thisPath in sourceLayer.paths: newPath =", "anchorPosition.y ) targetLayer.addAnchor_( newAnchor ) def copyMetricsFromLayerToLayer( self, sourceLayer, targetLayer", "value=True) self.w.keep_window_open = vanilla.CheckBox((15, 52+56, -100, 20), \"Keep window open\",", "self.w.setDefaultButton( self.w.copybutton ) # Load Settings: if not self.LoadPreferences(): print", "): \"\"\"Copies width of sourceLayer to targetLayer.\"\"\" sourceWidth = sourceLayer.width", "-10), \"Copy\", sizeStyle='small', callback=self.buttonCallback) self.w.setDefaultButton( self.w.copybutton ) # Load Settings:", "self.transform[3] scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2)) scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2)) if (b<0 and", "target layer\" sourceLayer.setAnchors_( None ) if anch_from > 0: print", "-15, -10), \"Copy\", sizeStyle='small', callback=self.buttonCallback) self.w.setDefaultButton( self.w.copybutton ) # Load", "\"Copy paths from\", sizeStyle='small') self.w.master_from = vanilla.PopUpButton((120, 12, -15, 17),", "num_into != 0: print \"- Cleaning out paths in target", "( windowWidth, windowHeight ), # default window size \"Copy layer", "one master to another master in selected glyphs. \"\"\" import", "return [scale_x, scale_y, rotation] class MasterFiller( object ): def __init__(", "52+2, -100, 20), \"Include components\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_anchors =", "targetLayer ): \"\"\"Copies all components from sourceLayer to targetLayer.\"\"\" comp_from", "thisAnchor in sourceLayer.anchors: anchorName = thisAnchor.name anchorPosition = NSPoint( thisAnchor.x,", "> 0: print \"- Copying anchors from source layer:\" for", "= 155 windowWidthResize = 120 # user can resize width", "targetLayer.components.append( newComp ) def copyAnchorsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies", "can resize width by this value windowHeightResize = 0 #", ") def copyComponentsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all components", "targetLayer.anchors ) if anch_into != 0: print \"- Cleaning out", "sender ): Glyphs.clearLog() Glyphs.showMacroWindow() print \"Copy Layer to Layer Protocol:\"", "size \"Copy layer to layer\", # window title minSize =", "= 120 # user can resize width by this value", "can resize height by this value self.w = vanilla.FloatingWindow( (", "sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_metrics = vanilla.CheckBox((15, 52+38, -100, 20), \"Include", "paths from\", sizeStyle='small') self.w.master_from = vanilla.PopUpButton((120, 12, -15, 17), self.GetMasterNames(),", "% sourceWidth else: print \"- Width not changed (already was", "len( targetLayer.anchors ) if anch_into != 0: print \"- Cleaning", ") targetLayer.addAnchor_( newAnchor ) def copyMetricsFromLayerToLayer( self, sourceLayer, targetLayer ):", "targetLayer.addAnchor_( newAnchor ) def copyMetricsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies", "out anchors in target layer\" sourceLayer.setAnchors_( None ) if anch_from", "resizing) autosaveName = \"com.mekkablue.MasterFiller.mainwindow\" # stores last window position and", "thisGlyph in selectedGlyphs: try: print \"\\nProcessing\", thisGlyph.name sourcelayer = thisGlyph.layers[", "sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_anchors = vanilla.CheckBox((15, 52+20, -100, 20), \"Include", "to defaults.\" self.w.open() self.w.makeKey() self.w.master_into.set(1) def SavePreferences( self, sender ):", "(%i, %i)\" % ( anchorName, anchorPosition.x, anchorPosition.y ) targetLayer.addAnchor_( newAnchor", "if anch_from > 0: print \"- Copying anchors from source", "targetlayer ) # copy components: if compYesNo: self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer", "), # maximum size (for resizing) autosaveName = \"com.mekkablue.MasterFiller.mainwindow\" #", "self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.include_components = vanilla.CheckBox((15, 52+2, -100, 20), \"Include", "sender ): if self.w.master_from.get() == self.w.master_into.get(): self.w.copybutton.enable( False ) else:", ": \"1\", \"com.mekkablue.MasterFiller.include_anchors\" : \"1\", \"com.mekkablue.MasterFiller.include_metrics\" : \"1\", \"com.mekkablue.MasterFiller.keep_window_open\" :", "): # Window 'self.w': windowWidth = 280 windowHeight = 155", "def MasterChangeCallback( self, sender ): if self.w.master_from.get() == self.w.master_into.get(): self.w.copybutton.enable(", "len( sourceLayer.paths ) num_into = len( targetLayer.paths ) if num_into", "thisComp.y) ) ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp) newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2]) print \"-- Component: %s\"", "math.sqrt(math.pow(a,2)+math.pow(b,2)) scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2)) if (b<0 and c<0): scale_y =", "= self.w.keep_window_open.get() except: return False return True def LoadPreferences( self", "preferences. Will resort to defaults.\" self.w.open() self.w.makeKey() self.w.master_into.set(1) def SavePreferences(", "newNode = GSNode() newNode.type = n.type newNode.connection = n.connection newNode.setPosition_(", "32, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.include_components = vanilla.CheckBox((15, 52+2,", "anch_into = len( targetLayer.anchors ) if anch_into != 0: print", "(%.1f)\" % sourceWidth else: print \"- Width not changed (already", "of\", sizeStyle='small') self.w.master_into = vanilla.PopUpButton((120, 32, -15, 17), self.GetMasterNames(), sizeStyle='small',", "paths: self.copyPathsFromLayerToLayer( sourcelayer, targetlayer ) # copy components: if compYesNo:", "self.w.include_components.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] ) self.w.include_anchors.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] ) self.w.include_metrics.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] ) self.w.keep_window_open.set(", "copy anchors: if anchYesNo: self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer ) # copy", "not necessary anymore, but once fixed a problem newComp =", "# user can resize height by this value self.w =", "Layer to Layer' could not load preferences. Will resort to", "x.name) ) return myMasterList def MasterChangeCallback( self, sender ): if", "] targetlayer = thisGlyph.layers[ index_into ] Font.disableUpdateInterface() # copy paths:", "anchorPosition = NSPoint( thisAnchor.x, thisAnchor.y ) newAnchor = GSAnchor( anchorName,", "b = self.transform[1] c = self.transform[2] d = self.transform[3] scale_x", "vanilla.TextBox((15, 32+2, 120, 14), \"into selection of\", sizeStyle='small') self.w.master_into =", "size (for resizing) maxSize = ( windowWidth + windowWidthResize, windowHeight", ") newPath.closed = thisPath.closed targetLayer.paths.append( newPath ) def copyComponentsFromLayerToLayer( self,", "len( targetLayer.components ) if comp_into != 0: print \"- Cleaning", "copyAnchorsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all anchors from sourceLayer", "GSPath() for n in thisPath.nodes: newNode = GSNode() newNode.type =", "targetLayer ): \"\"\"Copies width of sourceLayer to targetLayer.\"\"\" sourceWidth =", "sourceWidth: targetLayer.width = sourceWidth print \"- Copying width (%.1f)\" %", "thisComp.componentName ) # str() probably not necessary anymore, but once", ") num_into = len( targetLayer.paths ) if num_into != 0:", "except: return False return True def GetMasterNames( self ): myMasterList", "self.w.keep_window_open.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] ) except: return False return True def GetMasterNames(", "selection of\", sizeStyle='small') self.w.master_into = vanilla.PopUpButton((120, 32, -15, 17), self.GetMasterNames(),", "from sourceLayer to targetLayer\"\"\" num_from = len( sourceLayer.paths ) num_into", "open\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.copybutton = vanilla.Button((-80, -30, -15, -10),", "Copying components:\" for thisComp in sourceLayer.components: compName = str( thisComp.componentName", "rotation = math.atan2(b, a) * (180/math.pi) return [scale_x, scale_y, rotation]", "a problem newComp = GSComponent( compName ) newComp.setPosition_( (thisComp.x, thisComp.y)", "Layer to Layer Protocol:\" Font = Glyphs.font Doc = Glyphs.currentDocument", "Window 'self.w': windowWidth = 280 windowHeight = 155 windowWidthResize =", "to Layer' could not load preferences. Will resort to defaults.\"", "vanilla.CheckBox((15, 52+38, -100, 20), \"Include metrics\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.keep_window_open", "def SavePreferences( self, sender ): try: Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] = self.w.include_components.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"]", "%s (%i, %i)\" % ( anchorName, anchorPosition.x, anchorPosition.y ) targetLayer.addAnchor_(", "= sourceWidth print \"- Copying width (%.1f)\" % sourceWidth else:", "Copying width (%.1f)\" % sourceWidth else: print \"- Width not", "): if self.w.master_from.get() == self.w.master_into.get(): self.w.copybutton.enable( False ) else: self.w.copybutton.enable(", "Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] = self.w.include_metrics.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] = self.w.keep_window_open.get() except: return False return", "\"-- %s (%i, %i)\" % ( anchorName, anchorPosition.x, anchorPosition.y )", "of sourceLayer to targetLayer.\"\"\" sourceWidth = sourceLayer.width if targetLayer.width !=", "self.w.makeKey() self.w.master_into.set(1) def SavePreferences( self, sender ): try: Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] =", "= len( targetLayer.components ) if comp_into != 0: print \"-", ") if comp_into != 0: print \"- Cleaning out components", "math.sqrt(math.pow(c,2)+math.pow(d,2)) if (b<0 and c<0): scale_y = scale_y * -1", "targetlayer ) # copy anchors: if anchYesNo: self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer", "import vanilla import math def getComponentScaleX_scaleY_rotation( self ): a =", "compName ) targetLayer.components.append( newComp ) def copyAnchorsFromLayerToLayer( self, sourceLayer, targetLayer", "Glyphs.showMacroWindow() print \"Copy Layer to Layer Protocol:\" Font = Glyphs.font", "if self.w.master_from.get() == self.w.master_into.get(): self.w.copybutton.enable( False ) else: self.w.copybutton.enable( True", "if metrYesNo: self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer ) Font.enableUpdateInterface() except Exception, e:", "Copy Layer to Layer # -*- coding: utf-8 -*- __doc__=\"\"\"", "in target layer\" sourceLayer.setAnchors_( None ) if anch_from > 0:", "index_from = self.w.master_from.get() index_into = self.w.master_into.get() compYesNo = self.w.include_components.get() anchYesNo", "if compYesNo: self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer ) # copy anchors: if", "\"1\", \"com.mekkablue.MasterFiller.include_anchors\" : \"1\", \"com.mekkablue.MasterFiller.include_metrics\" : \"1\", \"com.mekkablue.MasterFiller.keep_window_open\" : \"1\"", "components: if compYesNo: self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer ) # copy anchors:", "c = self.transform[2] d = self.transform[3] scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2)) scale_y", ") if anch_from > 0: print \"- Copying anchors from", "(thisComp.x, thisComp.y) ) ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp) newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2]) print \"-- Component:", "120 # user can resize width by this value windowHeightResize", "= thisAnchor.name anchorPosition = NSPoint( thisAnchor.x, thisAnchor.y ) newAnchor =", "sourceLayer, targetLayer ): \"\"\"Copies width of sourceLayer to targetLayer.\"\"\" sourceWidth", "x = Glyphs.currentDocument.font.masters[i] myMasterList.append( '%i: %s' % (i, x.name) )", "= len( sourceLayer.paths ) num_into = len( targetLayer.paths ) if", "% ( anchorName, anchorPosition.x, anchorPosition.y ) targetLayer.addAnchor_( newAnchor ) def", "sourceWidth else: print \"- Width not changed (already was %.1f)\"", "= Glyphs.currentDocument selectedGlyphs = [ x.parent for x in Font.selectedLayers", "\"Include anchors\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_metrics = vanilla.CheckBox((15, 52+38, -100,", "comp_into )[::-1]: del targetLayer.components[i] if comp_from > 0: print \"-", "{ \"com.mekkablue.MasterFiller.include_components\" : \"1\", \"com.mekkablue.MasterFiller.include_anchors\" : \"1\", \"com.mekkablue.MasterFiller.include_metrics\" : \"1\",", "%s\" % ( compName ) targetLayer.components.append( newComp ) def copyAnchorsFromLayerToLayer(", "Font.selectedLayers ] index_from = self.w.master_from.get() index_into = self.w.master_into.get() compYesNo =", "layer\" for i in range( comp_into )[::-1]: del targetLayer.components[i] if", "for thisComp in sourceLayer.components: compName = str( thisComp.componentName ) #", "def __init__( self ): # Window 'self.w': windowWidth = 280", "self.w.copybutton = vanilla.Button((-80, -30, -15, -10), \"Copy\", sizeStyle='small', callback=self.buttonCallback) self.w.setDefaultButton(", "\"Keep window open\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.copybutton = vanilla.Button((-80, -30,", "= self.w.include_metrics.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] = self.w.keep_window_open.get() except: return False return True", "targetLayer.\"\"\" anch_from = len( sourceLayer.anchors ) anch_into = len( targetLayer.anchors", ") # copy metrics: if metrYesNo: self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer )", "\"Copy Layer to Layer Protocol:\" Font = Glyphs.font Doc =", "17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.include_components = vanilla.CheckBox((15, 52+2, -100, 20),", "components\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_anchors = vanilla.CheckBox((15, 52+20, -100, 20),", "in target layer\" for i in range( comp_into )[::-1]: del", "= NSPoint( thisAnchor.x, thisAnchor.y ) newAnchor = GSAnchor( anchorName, anchorPosition", "\"Copy layer to layer\", # window title minSize = (", "windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size", "self.w.text_1 = vanilla.TextBox((15, 12+2, 120, 14), \"Copy paths from\", sizeStyle='small')", "newAnchor = GSAnchor( anchorName, anchorPosition ) print \"-- %s (%i,", "to targetLayer.\"\"\" sourceWidth = sourceLayer.width if targetLayer.width != sourceWidth: targetLayer.width", "self.w.keep_window_open.get() except: return False return True def LoadPreferences( self ):", "sourceLayer.paths ) num_into = len( targetLayer.paths ) if num_into !=", "sourceLayer.components: compName = str( thisComp.componentName ) # str() probably not", "metrYesNo = self.w.include_metrics.get() for thisGlyph in selectedGlyphs: try: print \"\\nProcessing\",", "thisGlyph.layers[ index_from ] targetlayer = thisGlyph.layers[ index_into ] Font.disableUpdateInterface() #", "120, 14), \"into selection of\", sizeStyle='small') self.w.master_into = vanilla.PopUpButton((120, 32,", "== self.w.master_into.get(): self.w.copybutton.enable( False ) else: self.w.copybutton.enable( True ) def", "[ x.parent for x in Font.selectedLayers ] index_from = self.w.master_from.get()", "__init__( self ): # Window 'self.w': windowWidth = 280 windowHeight", "targetLayer.width != sourceWidth: targetLayer.width = sourceWidth print \"- Copying width", "0 # user can resize height by this value self.w", "print \"Note: 'Copy Layer to Layer' could not load preferences.", "= vanilla.PopUpButton((120, 12, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.text_2 =", "sourcelayer, targetlayer ) # copy anchors: if anchYesNo: self.copyAnchorsFromLayerToLayer( sourcelayer,", "defaults.\" self.w.open() self.w.makeKey() self.w.master_into.set(1) def SavePreferences( self, sender ): try:", "NSPoint( thisAnchor.x, thisAnchor.y ) newAnchor = GSAnchor( anchorName, anchorPosition )", "for n in thisPath.nodes: newNode = GSNode() newNode.type = n.type", "def buttonCallback( self, sender ): Glyphs.clearLog() Glyphs.showMacroWindow() print \"Copy Layer", "layer to layer\", # window title minSize = ( windowWidth,", "sourcelayer, targetlayer ) # copy components: if compYesNo: self.copyComponentsFromLayerToLayer( sourcelayer,", "maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ),", "newNode.setPosition_( (n.x, n.y) ) newPath.addNode_( newNode ) newPath.closed = thisPath.closed", "window size \"Copy layer to layer\", # window title minSize", "MasterFiller( object ): def __init__( self ): # Window 'self.w':", "try: Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] = self.w.include_components.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] = self.w.include_anchors.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] = self.w.include_metrics.get()", "self, sourceLayer, targetLayer ): \"\"\"Copies all components from sourceLayer to", "= math.sqrt(math.pow(a,2)+math.pow(b,2)) scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2)) if (b<0 and c<0): scale_y", "target layer\" for i in range( num_into )[::-1]: del targetLayer.paths[i]", "necessary anymore, but once fixed a problem newComp = GSComponent(", "( anchorName, anchorPosition.x, anchorPosition.y ) targetLayer.addAnchor_( newAnchor ) def copyMetricsFromLayerToLayer(", "len( targetLayer.paths ) if num_into != 0: print \"- Cleaning", "another master in selected glyphs. \"\"\" import GlyphsApp import vanilla", "self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer ) Font.enableUpdateInterface() except Exception, e: print e", "newComp.setPosition_( (thisComp.x, thisComp.y) ) ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp) newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2]) print \"--", "anchorPosition ) print \"-- %s (%i, %i)\" % ( anchorName,", "load preferences. Will resort to defaults.\" self.w.open() self.w.makeKey() self.w.master_into.set(1) def", "\"- Copying anchors from source layer:\" for thisAnchor in sourceLayer.anchors:", "in range( num_into )[::-1]: del targetLayer.paths[i] if num_from > 0:", "and c<0): scale_y = scale_y * -1 rotation = math.atan2(b,", "\"com.mekkablue.MasterFiller.include_anchors\" : \"1\", \"com.mekkablue.MasterFiller.include_metrics\" : \"1\", \"com.mekkablue.MasterFiller.keep_window_open\" : \"1\" }", "buttonCallback( self, sender ): Glyphs.clearLog() Glyphs.showMacroWindow() print \"Copy Layer to", "): def __init__( self ): # Window 'self.w': windowWidth =", "= self.w.include_components.get() anchYesNo = self.w.include_anchors.get() metrYesNo = self.w.include_metrics.get() for thisGlyph", "window open\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.copybutton = vanilla.Button((-80, -30, -15,", "!= 0: print \"- Cleaning out paths in target layer\"", "layer\" for i in range( num_into )[::-1]: del targetLayer.paths[i] if", "return False return True def LoadPreferences( self ): try: NSUserDefaults.standardUserDefaults().registerDefaults_(", "False return True def LoadPreferences( self ): try: NSUserDefaults.standardUserDefaults().registerDefaults_( {", "12+2, 120, 14), \"Copy paths from\", sizeStyle='small') self.w.master_from = vanilla.PopUpButton((120,", "range( num_into )[::-1]: del targetLayer.paths[i] if num_from > 0: print", "n in thisPath.nodes: newNode = GSNode() newNode.type = n.type newNode.connection", "num_into = len( targetLayer.paths ) if num_into != 0: print", "title minSize = ( windowWidth, windowHeight ), # minimum size", "self, sender ): if self.w.master_from.get() == self.w.master_into.get(): self.w.copybutton.enable( False )", "\"Note: 'Copy Layer to Layer' could not load preferences. Will", "Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] ) self.w.include_metrics.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] ) self.w.keep_window_open.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] ) except: return", ") if num_into != 0: print \"- Cleaning out paths", "\"\\nProcessing\", thisGlyph.name sourcelayer = thisGlyph.layers[ index_from ] targetlayer = thisGlyph.layers[", "self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer ) # copy anchors: if anchYesNo: self.copyAnchorsFromLayerToLayer(", "\"- Copying width (%.1f)\" % sourceWidth else: print \"- Width", "): myMasterList = [] for i in range( len( Glyphs.currentDocument.font.masters", "anchorPosition.x, anchorPosition.y ) targetLayer.addAnchor_( newAnchor ) def copyMetricsFromLayerToLayer( self, sourceLayer,", "20), \"Include components\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_anchors = vanilla.CheckBox((15, 52+20,", "= thisGlyph.layers[ index_from ] targetlayer = thisGlyph.layers[ index_into ] Font.disableUpdateInterface()", "num_into )[::-1]: del targetLayer.paths[i] if num_from > 0: print \"-", "sourceLayer to targetLayer.\"\"\" sourceWidth = sourceLayer.width if targetLayer.width != sourceWidth:", "), # default window size \"Copy layer to layer\", #", "= Glyphs.currentDocument.font.masters[i] myMasterList.append( '%i: %s' % (i, x.name) ) return", "from\", sizeStyle='small') self.w.master_from = vanilla.PopUpButton((120, 12, -15, 17), self.GetMasterNames(), sizeStyle='small',", "windowHeight + windowHeightResize ), # maximum size (for resizing) autosaveName", "stores last window position and size ) self.w.text_1 = vanilla.TextBox((15,", "# Window 'self.w': windowWidth = 280 windowHeight = 155 windowWidthResize", "thisComp in sourceLayer.components: compName = str( thisComp.componentName ) # str()", "default window size \"Copy layer to layer\", # window title", ")[::-1]: del targetLayer.components[i] if comp_from > 0: print \"- Copying", "master to another master in selected glyphs. \"\"\" import GlyphsApp", "not self.LoadPreferences(): print \"Note: 'Copy Layer to Layer' could not", "if comp_from > 0: print \"- Copying components:\" for thisComp", "): \"\"\"Copies all anchors from sourceLayer to targetLayer.\"\"\" anch_from =", "sourceLayer, targetLayer ): \"\"\"Copies all anchors from sourceLayer to targetLayer.\"\"\"", "if (b<0 and c<0): scale_y = scale_y * -1 rotation", "0: print \"- Cleaning out anchors in target layer\" sourceLayer.setAnchors_(", "out paths in target layer\" for i in range( num_into", "% ( compName ) targetLayer.components.append( newComp ) def copyAnchorsFromLayerToLayer( self,", "sourcelayer, targetlayer ) # copy metrics: if metrYesNo: self.copyMetricsFromLayerToLayer( sourcelayer,", "Settings: if not self.LoadPreferences(): print \"Note: 'Copy Layer to Layer'", "] index_from = self.w.master_from.get() index_into = self.w.master_into.get() compYesNo = self.w.include_components.get()", "Layer Protocol:\" Font = Glyphs.font Doc = Glyphs.currentDocument selectedGlyphs =", "(already was %.1f)\" % sourceWidth def buttonCallback( self, sender ):", "# -*- coding: utf-8 -*- __doc__=\"\"\" Copies one master to", "True ) def copyPathsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all", "to targetLayer\"\"\" num_from = len( sourceLayer.paths ) num_into = len(", "resort to defaults.\" self.w.open() self.w.makeKey() self.w.master_into.set(1) def SavePreferences( self, sender", "windowWidth, windowHeight ), # minimum size (for resizing) maxSize =", "a) * (180/math.pi) return [scale_x, scale_y, rotation] class MasterFiller( object", "anchYesNo = self.w.include_anchors.get() metrYesNo = self.w.include_metrics.get() for thisGlyph in selectedGlyphs:", "to Layer Protocol:\" Font = Glyphs.font Doc = Glyphs.currentDocument selectedGlyphs", "= GSAnchor( anchorName, anchorPosition ) print \"-- %s (%i, %i)\"", "math def getComponentScaleX_scaleY_rotation( self ): a = self.transform[0] b =", "%.1f)\" % sourceWidth def buttonCallback( self, sender ): Glyphs.clearLog() Glyphs.showMacroWindow()", "resize width by this value windowHeightResize = 0 # user", "position and size ) self.w.text_1 = vanilla.TextBox((15, 12+2, 120, 14),", "True def LoadPreferences( self ): try: NSUserDefaults.standardUserDefaults().registerDefaults_( { \"com.mekkablue.MasterFiller.include_components\" :", "i in range( comp_into )[::-1]: del targetLayer.components[i] if comp_from >", "> 0: print \"- Copying paths\" for thisPath in sourceLayer.paths:", "\"com.mekkablue.MasterFiller.keep_window_open\" : \"1\" } ) self.w.include_components.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] ) self.w.include_anchors.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"]", "value self.w = vanilla.FloatingWindow( ( windowWidth, windowHeight ), # default", ": \"1\", \"com.mekkablue.MasterFiller.keep_window_open\" : \"1\" } ) self.w.include_components.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] )", "None ) if anch_from > 0: print \"- Copying anchors", "= GSPath() for n in thisPath.nodes: newNode = GSNode() newNode.type", "20), \"Include anchors\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_metrics = vanilla.CheckBox((15, 52+38,", "comp_into != 0: print \"- Cleaning out components in target", "-1 rotation = math.atan2(b, a) * (180/math.pi) return [scale_x, scale_y,", "by this value windowHeightResize = 0 # user can resize", "sender ): try: Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] = self.w.include_components.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] = self.w.include_anchors.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"]", "Doc = Glyphs.currentDocument selectedGlyphs = [ x.parent for x in", "self.w.master_into = vanilla.PopUpButton((120, 32, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.include_components", ") ): x = Glyphs.currentDocument.font.masters[i] myMasterList.append( '%i: %s' % (i,", "newPath = GSPath() for n in thisPath.nodes: newNode = GSNode()", "Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] = self.w.include_anchors.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] = self.w.include_metrics.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] = self.w.keep_window_open.get() except:", "Component: %s\" % ( compName ) targetLayer.components.append( newComp ) def", "newComp ) def copyAnchorsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all", "-100, 20), \"Include components\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_anchors = vanilla.CheckBox((15,", "str( thisComp.componentName ) # str() probably not necessary anymore, but", "-*- __doc__=\"\"\" Copies one master to another master in selected", "= len( targetLayer.anchors ) if anch_into != 0: print \"-", "copy metrics: if metrYesNo: self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer ) Font.enableUpdateInterface() except", "Layer to Layer # -*- coding: utf-8 -*- __doc__=\"\"\" Copies", "MasterChangeCallback( self, sender ): if self.w.master_from.get() == self.w.master_into.get(): self.w.copybutton.enable( False", "in sourceLayer.anchors: anchorName = thisAnchor.name anchorPosition = NSPoint( thisAnchor.x, thisAnchor.y", "-100, 20), \"Include metrics\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.keep_window_open = vanilla.CheckBox((15,", "this value windowHeightResize = 0 # user can resize height", "= n.type newNode.connection = n.connection newNode.setPosition_( (n.x, n.y) ) newPath.addNode_(", "for thisGlyph in selectedGlyphs: try: print \"\\nProcessing\", thisGlyph.name sourcelayer =", "-15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback) self.w.include_components = vanilla.CheckBox((15, 52+2, -100,", "self.w.include_anchors.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] = self.w.include_metrics.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"] = self.w.keep_window_open.get() except: return False", "= len( sourceLayer.components ) comp_into = len( targetLayer.components ) if", ") else: self.w.copybutton.enable( True ) def copyPathsFromLayerToLayer( self, sourceLayer, targetLayer", "\"\"\" import GlyphsApp import vanilla import math def getComponentScaleX_scaleY_rotation( self", "newComp = GSComponent( compName ) newComp.setPosition_( (thisComp.x, thisComp.y) ) ScaleX_scaleY_rotation", "= GSComponent( compName ) newComp.setPosition_( (thisComp.x, thisComp.y) ) ScaleX_scaleY_rotation =", "): Glyphs.clearLog() Glyphs.showMacroWindow() print \"Copy Layer to Layer Protocol:\" Font", "self.w.master_into.get(): self.w.copybutton.enable( False ) else: self.w.copybutton.enable( True ) def copyPathsFromLayerToLayer(", "# default window size \"Copy layer to layer\", # window", "= math.atan2(b, a) * (180/math.pi) return [scale_x, scale_y, rotation] class", "scale_y = scale_y * -1 rotation = math.atan2(b, a) *", "= Glyphs.font Doc = Glyphs.currentDocument selectedGlyphs = [ x.parent for", "vanilla.CheckBox((15, 52+20, -100, 20), \"Include anchors\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_metrics", "could not load preferences. Will resort to defaults.\" self.w.open() self.w.makeKey()", "coding: utf-8 -*- __doc__=\"\"\" Copies one master to another master", "Font.disableUpdateInterface() # copy paths: self.copyPathsFromLayerToLayer( sourcelayer, targetlayer ) # copy", ") # str() probably not necessary anymore, but once fixed", "getComponentScaleX_scaleY_rotation( self ): a = self.transform[0] b = self.transform[1] c", "sourceLayer, targetLayer ): \"\"\"Copies all components from sourceLayer to targetLayer.\"\"\"", "NSUserDefaults.standardUserDefaults().registerDefaults_( { \"com.mekkablue.MasterFiller.include_components\" : \"1\", \"com.mekkablue.MasterFiller.include_anchors\" : \"1\", \"com.mekkablue.MasterFiller.include_metrics\" :", "n.y) ) newPath.addNode_( newNode ) newPath.closed = thisPath.closed targetLayer.paths.append( newPath", "range( comp_into )[::-1]: del targetLayer.components[i] if comp_from > 0: print", "anchorName, anchorPosition ) print \"-- %s (%i, %i)\" % (", "= self.transform[1] c = self.transform[2] d = self.transform[3] scale_x =", "!= 0: print \"- Cleaning out anchors in target layer\"", "= [ x.parent for x in Font.selectedLayers ] index_from =", "self.w.master_into.get() compYesNo = self.w.include_components.get() anchYesNo = self.w.include_anchors.get() metrYesNo = self.w.include_metrics.get()", "Will resort to defaults.\" self.w.open() self.w.makeKey() self.w.master_into.set(1) def SavePreferences( self,", "components in target layer\" for i in range( comp_into )[::-1]:", ") def copyAnchorsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all anchors", "= vanilla.Button((-80, -30, -15, -10), \"Copy\", sizeStyle='small', callback=self.buttonCallback) self.w.setDefaultButton( self.w.copybutton", "windowWidthResize = 120 # user can resize width by this", "\"1\", \"com.mekkablue.MasterFiller.keep_window_open\" : \"1\" } ) self.w.include_components.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] ) self.w.include_anchors.set(", "( windowWidth, windowHeight ), # minimum size (for resizing) maxSize", "anch_from = len( sourceLayer.anchors ) anch_into = len( targetLayer.anchors )", "paths\" for thisPath in sourceLayer.paths: newPath = GSPath() for n", "to layer\", # window title minSize = ( windowWidth, windowHeight", "selected glyphs. \"\"\" import GlyphsApp import vanilla import math def", "last window position and size ) self.w.text_1 = vanilla.TextBox((15, 12+2,", "self.w.include_components.get() anchYesNo = self.w.include_anchors.get() metrYesNo = self.w.include_metrics.get() for thisGlyph in", ") # copy components: if compYesNo: self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer )", "selectedGlyphs: try: print \"\\nProcessing\", thisGlyph.name sourcelayer = thisGlyph.layers[ index_from ]", "index_into ] Font.disableUpdateInterface() # copy paths: self.copyPathsFromLayerToLayer( sourcelayer, targetlayer )", "Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] ) self.w.include_anchors.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] ) self.w.include_metrics.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] ) self.w.keep_window_open.set( Glyphs.defaults[\"com.mekkablue.MasterFiller.keep_window_open\"]", "windowWidth, windowHeight ), # default window size \"Copy layer to", ") def copyMetricsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies width of", "# str() probably not necessary anymore, but once fixed a", "source layer:\" for thisAnchor in sourceLayer.anchors: anchorName = thisAnchor.name anchorPosition", "Glyphs.font Doc = Glyphs.currentDocument selectedGlyphs = [ x.parent for x", "callback=self.SavePreferences, value=True) self.w.include_metrics = vanilla.CheckBox((15, 52+38, -100, 20), \"Include metrics\",", "\"- Cleaning out components in target layer\" for i in", "self.w.text_2 = vanilla.TextBox((15, 32+2, 120, 14), \"into selection of\", sizeStyle='small')", "windowWidth = 280 windowHeight = 155 windowWidthResize = 120 #", "value=True) self.w.copybutton = vanilla.Button((-80, -30, -15, -10), \"Copy\", sizeStyle='small', callback=self.buttonCallback)", "targetlayer ) # copy metrics: if metrYesNo: self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer", "self, sourceLayer, targetLayer ): \"\"\"Copies all paths from sourceLayer to", "layer\" sourceLayer.setAnchors_( None ) if anch_from > 0: print \"-", "print \"-- %s (%i, %i)\" % ( anchorName, anchorPosition.x, anchorPosition.y", "maximum size (for resizing) autosaveName = \"com.mekkablue.MasterFiller.mainwindow\" # stores last", "0: print \"- Cleaning out components in target layer\" for", "if num_into != 0: print \"- Cleaning out paths in", "sourceLayer to targetLayer.\"\"\" anch_from = len( sourceLayer.anchors ) anch_into =", "compName = str( thisComp.componentName ) # str() probably not necessary", "14), \"Copy paths from\", sizeStyle='small') self.w.master_from = vanilla.PopUpButton((120, 12, -15,", "paths from sourceLayer to targetLayer\"\"\" num_from = len( sourceLayer.paths )", "self.transform[1] c = self.transform[2] d = self.transform[3] scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2))", "= vanilla.FloatingWindow( ( windowWidth, windowHeight ), # default window size", "= self.transform[2] d = self.transform[3] scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2)) scale_y =", "= len( targetLayer.paths ) if num_into != 0: print \"-", "thisAnchor.x, thisAnchor.y ) newAnchor = GSAnchor( anchorName, anchorPosition ) print", "object ): def __init__( self ): # Window 'self.w': windowWidth", "in selectedGlyphs: try: print \"\\nProcessing\", thisGlyph.name sourcelayer = thisGlyph.layers[ index_from", "14), \"into selection of\", sizeStyle='small') self.w.master_into = vanilla.PopUpButton((120, 32, -15,", "self, sender ): Glyphs.clearLog() Glyphs.showMacroWindow() print \"Copy Layer to Layer", "\"\"\"Copies all anchors from sourceLayer to targetLayer.\"\"\" anch_from = len(", "= str( thisComp.componentName ) # str() probably not necessary anymore,", "sourceLayer to targetLayer\"\"\" num_from = len( sourceLayer.paths ) num_into =", "= \"com.mekkablue.MasterFiller.mainwindow\" # stores last window position and size )", "%i)\" % ( anchorName, anchorPosition.x, anchorPosition.y ) targetLayer.addAnchor_( newAnchor )", "del targetLayer.components[i] if comp_from > 0: print \"- Copying components:\"", "anchors\", sizeStyle='small', callback=self.SavePreferences, value=True) self.w.include_metrics = vanilla.CheckBox((15, 52+38, -100, 20),", "self.LoadPreferences(): print \"Note: 'Copy Layer to Layer' could not load", "from source layer:\" for thisAnchor in sourceLayer.anchors: anchorName = thisAnchor.name", "# maximum size (for resizing) autosaveName = \"com.mekkablue.MasterFiller.mainwindow\" # stores", "\"- Cleaning out anchors in target layer\" sourceLayer.setAnchors_( None )", "Cleaning out paths in target layer\" for i in range(", "self.w.include_metrics.get() for thisGlyph in selectedGlyphs: try: print \"\\nProcessing\", thisGlyph.name sourcelayer", ") newComp.setPosition_( (thisComp.x, thisComp.y) ) ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp) newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2]) print", "(b<0 and c<0): scale_y = scale_y * -1 rotation =", "self.w.open() self.w.makeKey() self.w.master_into.set(1) def SavePreferences( self, sender ): try: Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"]", "components:\" for thisComp in sourceLayer.components: compName = str( thisComp.componentName )", "width (%.1f)\" % sourceWidth else: print \"- Width not changed", "copy components: if compYesNo: self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer ) # copy", "= self.w.master_from.get() index_into = self.w.master_into.get() compYesNo = self.w.include_components.get() anchYesNo =", "self.w = vanilla.FloatingWindow( ( windowWidth, windowHeight ), # default window", "def copyComponentsFromLayerToLayer( self, sourceLayer, targetLayer ): \"\"\"Copies all components from", "= vanilla.TextBox((15, 12+2, 120, 14), \"Copy paths from\", sizeStyle='small') self.w.master_from", "Glyphs.currentDocument.font.masters ) ): x = Glyphs.currentDocument.font.masters[i] myMasterList.append( '%i: %s' %", "), # minimum size (for resizing) maxSize = ( windowWidth", "n.type newNode.connection = n.connection newNode.setPosition_( (n.x, n.y) ) newPath.addNode_( newNode", "self ): a = self.transform[0] b = self.transform[1] c =", "* (180/math.pi) return [scale_x, scale_y, rotation] class MasterFiller( object ):", "Font = Glyphs.font Doc = Glyphs.currentDocument selectedGlyphs = [ x.parent", "selectedGlyphs = [ x.parent for x in Font.selectedLayers ] index_from", "(i, x.name) ) return myMasterList def MasterChangeCallback( self, sender ):", "): try: Glyphs.defaults[\"com.mekkablue.MasterFiller.include_components\"] = self.w.include_components.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_anchors\"] = self.w.include_anchors.get() Glyphs.defaults[\"com.mekkablue.MasterFiller.include_metrics\"] =", "in range( comp_into )[::-1]: del targetLayer.components[i] if comp_from > 0:", "self.w.include_components = vanilla.CheckBox((15, 52+2, -100, 20), \"Include components\", sizeStyle='small', callback=self.SavePreferences,", "x.parent for x in Font.selectedLayers ] index_from = self.w.master_from.get() index_into", "= sourceLayer.width if targetLayer.width != sourceWidth: targetLayer.width = sourceWidth print" ]
[ "~~~\"\"\") def test_describes_multi_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"), \"\"\"\\ at", "E ~~~\"\"\") def test_describes_multi_line_location(self): self.assertEqual( _describe_location(\"\"\"\\ S____ E \"\"\"), \"\"\"\\", "\"\"\"), \"\"\"\\ at filename0 line 1: S ~\"\"\") def test_describes_multi_char_location(self):", "end = start location = ((filename, (start, end)), location) with", "the Mozilla Public # License, v. 2.0. If a copy", "(0, 0)), None)), \"Unknown Python string\") def _describe_location(*codes): \"\"\" Helper", "mock_read_file: with mock.patch(\"vunit.parsing.tokenizer.file_exists\", autospec=True) as mock_file_exists: def file_exists_side_effect(filename): return filename", "None)), \"Unknown Python string\") def _describe_location(*codes): \"\"\" Helper to test", "_describe_location(\"\"\"\\ S E \"\"\"), \"\"\"\\ at filename0 line 1: S", "You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (c)", "at filename0 line 1: S ~\"\"\") def test_describes_multi_char_location(self): self.assertEqual( _describe_location(\"\"\"\\", "0)), None)), \"Unknown Python string\") def _describe_location(*codes): \"\"\" Helper to", "start = code.index(\"S\") if \"E\" in code: end = code.index(\"E\")", "(start, end)), location) with mock.patch(\"vunit.parsing.tokenizer.read_file\", autospec=True) as mock_read_file: with mock.patch(\"vunit.parsing.tokenizer.file_exists\",", "line 1: S E ~~~\"\"\") def test_describes_multi_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S", "as mock_read_file: with mock.patch(\"vunit.parsing.tokenizer.file_exists\", autospec=True) as mock_file_exists: def file_exists_side_effect(filename): return", "E ~~~\"\"\") def test_describes_multi_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"), \"\"\"\\", "at http://mozilla.org/MPL/2.0/. # # Copyright (c) 2014-2018, <NAME> <EMAIL> \"\"\"", "~~~~ at filename1 line 3: SE ~~\"\"\") def test_describe_location_none(self): self.assertEqual(describe_location(None),", "3: SE ~~\"\"\") def test_describe_location_none(self): self.assertEqual(describe_location(None), \"Unknown location\") def test_describe_missing_location(self):", "missing.svh\") def test_describe_none_filename_location(self): self.assertEqual(describe_location(((None, (0, 0)), None)), \"Unknown Python string\")", "test_describes_multi_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"), \"\"\"\\ at filename0 line", "location\") def test_describe_missing_location(self): self.assertEqual(describe_location(((\"missing.svh\", (0, 0)), None)), \"Unknown location in", "# # Copyright (c) 2014-2018, <NAME> <EMAIL> \"\"\" Test of", "2014-2018, <NAME> <EMAIL> \"\"\" Test of the general tokenizer \"\"\"", "def test_describes_single_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\ at filename0 line", "import mock class TestTokenizer(TestCase): \"\"\" Test of the general tokenizer", "TestCase from vunit.parsing.tokenizer import describe_location from vunit.test.mock_2or3 import mock class", "mock class TestTokenizer(TestCase): \"\"\" Test of the general tokenizer \"\"\"", "of the general tokenizer \"\"\" def test_describes_single_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S", "_describe_location(\"\"\"\\ S____ E \"\"\"), \"\"\"\\ at filename0 line 1: S____", "with mock.patch(\"vunit.parsing.tokenizer.read_file\", autospec=True) as mock_read_file: with mock.patch(\"vunit.parsing.tokenizer.file_exists\", autospec=True) as mock_file_exists:", "test_describes_multi_file_location(self): self.assertEqual( _describe_location(\"\"\"\\ S__E\"\"\", \"\"\"\\ SE\"\"\"), \"\"\"\\ from filename0 line", "at filename1 line 3: SE ~~\"\"\") def test_describe_location_none(self): self.assertEqual(describe_location(None), \"Unknown", "a copy of the MPL was not distributed with this", "filename0 line 1: S E ~~~\"\"\") def test_describes_multi_line_location(self): self.assertEqual( _describe_location(\"\"\"\\", "\"Unknown location in missing.svh\") def test_describe_none_filename_location(self): self.assertEqual(describe_location(((None, (0, 0)), None)),", "general tokenizer \"\"\" from unittest import TestCase from vunit.parsing.tokenizer import", "\"\"\"), \"\"\"\\ at filename0 line 1: S ~\"\"\") def test_describes_single_char_location_within(self):", "vunit.parsing.tokenizer import describe_location from vunit.test.mock_2or3 import mock class TestTokenizer(TestCase): \"\"\"", "of the MPL was not distributed with this file, #", "mock_file_exists.side_effect = file_exists_side_effect mock_read_file.side_effect = read_file_side_effect retval = describe_location(location=location) return", "= \"filename%i\" % idx contents[filename] = code start = code.index(\"S\")", "Copyright (c) 2014-2018, <NAME> <EMAIL> \"\"\" Test of the general", "end = code.index(\"E\") else: end = start location = ((filename,", "TestTokenizer(TestCase): \"\"\" Test of the general tokenizer \"\"\" def test_describes_single_char_location(self):", "S E ~~~\"\"\") def test_describes_multi_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"),", "in contents def read_file_side_effect(filename): return contents[filename] mock_file_exists.side_effect = file_exists_side_effect mock_read_file.side_effect", "def test_describes_multi_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"), \"\"\"\\ at filename0", "S__E\"\"\", \"\"\"\\ SE\"\"\"), \"\"\"\\ from filename0 line 2: S__E ~~~~", "string\") def _describe_location(*codes): \"\"\" Helper to test describe_location \"\"\" contents", "2.0. If a copy of the MPL was not distributed", "S ~\"\"\") def test_describes_multi_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"), \"\"\"\\", "self.assertEqual( _describe_location(\"\"\"\\ S__E\"\"\", \"\"\"\\ SE\"\"\"), \"\"\"\\ from filename0 line 2:", "# Copyright (c) 2014-2018, <NAME> <EMAIL> \"\"\" Test of the", "in enumerate(codes): filename = \"filename%i\" % idx contents[filename] = code", "mock_file_exists: def file_exists_side_effect(filename): return filename in contents def read_file_side_effect(filename): return", "1: S____ ~~~~~\"\"\") def test_describes_multi_file_location(self): self.assertEqual( _describe_location(\"\"\"\\ S__E\"\"\", \"\"\"\\ SE\"\"\"),", "test_describe_none_filename_location(self): self.assertEqual(describe_location(((None, (0, 0)), None)), \"Unknown Python string\") def _describe_location(*codes):", "this file, # You can obtain one at http://mozilla.org/MPL/2.0/. #", "code.index(\"S\") if \"E\" in code: end = code.index(\"E\") else: end", "def test_describes_multi_line_location(self): self.assertEqual( _describe_location(\"\"\"\\ S____ E \"\"\"), \"\"\"\\ at filename0", "\"\"\" from unittest import TestCase from vunit.parsing.tokenizer import describe_location from", "in code: end = code.index(\"E\") else: end = start location", "\"\"\"\\ at filename0 line 1: S E ~~~\"\"\") def test_describes_multi_line_location(self):", "is subject to the terms of the Mozilla Public #", "from vunit.test.mock_2or3 import mock class TestTokenizer(TestCase): \"\"\" Test of the", "subject to the terms of the Mozilla Public # License,", "def file_exists_side_effect(filename): return filename in contents def read_file_side_effect(filename): return contents[filename]", "test_describes_single_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\ at filename0 line 1:", "\"\"\"), \"\"\"\\ at filename0 line 1: S E ~~~\"\"\") def", "the terms of the Mozilla Public # License, v. 2.0.", "def test_describe_none_filename_location(self): self.assertEqual(describe_location(((None, (0, 0)), None)), \"Unknown Python string\") def", "was not distributed with this file, # You can obtain", "from vunit.parsing.tokenizer import describe_location from vunit.test.mock_2or3 import mock class TestTokenizer(TestCase):", "def test_describes_single_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\ at filename0 line", "as mock_file_exists: def file_exists_side_effect(filename): return filename in contents def read_file_side_effect(filename):", "~\"\"\") def test_describes_single_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\ at filename0", "Public # License, v. 2.0. If a copy of the", "http://mozilla.org/MPL/2.0/. # # Copyright (c) 2014-2018, <NAME> <EMAIL> \"\"\" Test", "import TestCase from vunit.parsing.tokenizer import describe_location from vunit.test.mock_2or3 import mock", "contents = {} location = None for idx, code in", "_describe_location(\"\"\"\\ S__E\"\"\", \"\"\"\\ SE\"\"\"), \"\"\"\\ from filename0 line 2: S__E", "\"E\" in code: end = code.index(\"E\") else: end = start", "enumerate(codes): filename = \"filename%i\" % idx contents[filename] = code start", "= ((filename, (start, end)), location) with mock.patch(\"vunit.parsing.tokenizer.read_file\", autospec=True) as mock_read_file:", "autospec=True) as mock_read_file: with mock.patch(\"vunit.parsing.tokenizer.file_exists\", autospec=True) as mock_file_exists: def file_exists_side_effect(filename):", "copy of the MPL was not distributed with this file,", "= None for idx, code in enumerate(codes): filename = \"filename%i\"", "= code.index(\"S\") if \"E\" in code: end = code.index(\"E\") else:", "test_describes_multi_line_location(self): self.assertEqual( _describe_location(\"\"\"\\ S____ E \"\"\"), \"\"\"\\ at filename0 line", "\"filename%i\" % idx contents[filename] = code start = code.index(\"S\") if", "Form is subject to the terms of the Mozilla Public", "# This Source Code Form is subject to the terms", "\"\"\" def test_describes_single_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\ at filename0", "def test_describes_multi_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"), \"\"\"\\ at filename0", "code start = code.index(\"S\") if \"E\" in code: end =", "S \"\"\"), \"\"\"\\ at filename0 line 1: S ~\"\"\") def", "((filename, (start, end)), location) with mock.patch(\"vunit.parsing.tokenizer.read_file\", autospec=True) as mock_read_file: with", "location = ((filename, (start, end)), location) with mock.patch(\"vunit.parsing.tokenizer.read_file\", autospec=True) as", "_describe_location(*codes): \"\"\" Helper to test describe_location \"\"\" contents = {}", "v. 2.0. If a copy of the MPL was not", "{} location = None for idx, code in enumerate(codes): filename", "unittest import TestCase from vunit.parsing.tokenizer import describe_location from vunit.test.mock_2or3 import", "self.assertEqual(describe_location(((\"missing.svh\", (0, 0)), None)), \"Unknown location in missing.svh\") def test_describe_none_filename_location(self):", "else: end = start location = ((filename, (start, end)), location)", "with this file, # You can obtain one at http://mozilla.org/MPL/2.0/.", "~~~\"\"\") def test_describes_multi_line_location(self): self.assertEqual( _describe_location(\"\"\"\\ S____ E \"\"\"), \"\"\"\\ at", "S__E ~~~~ at filename1 line 3: SE ~~\"\"\") def test_describe_location_none(self):", "self.assertEqual(describe_location(None), \"Unknown location\") def test_describe_missing_location(self): self.assertEqual(describe_location(((\"missing.svh\", (0, 0)), None)), \"Unknown", "start location = ((filename, (start, end)), location) with mock.patch(\"vunit.parsing.tokenizer.read_file\", autospec=True)", "with mock.patch(\"vunit.parsing.tokenizer.file_exists\", autospec=True) as mock_file_exists: def file_exists_side_effect(filename): return filename in", "Code Form is subject to the terms of the Mozilla", "one at http://mozilla.org/MPL/2.0/. # # Copyright (c) 2014-2018, <NAME> <EMAIL>", "\"\"\" Test of the general tokenizer \"\"\" from unittest import", "describe_location \"\"\" contents = {} location = None for idx,", "at filename0 line 1: S E ~~~\"\"\") def test_describes_multi_line_location(self): self.assertEqual(", "\"\"\"), \"\"\"\\ at filename0 line 1: S____ ~~~~~\"\"\") def test_describes_multi_file_location(self):", "This Source Code Form is subject to the terms of", "file, # You can obtain one at http://mozilla.org/MPL/2.0/. # #", "1: S ~\"\"\") def test_describes_single_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\", "line 1: S ~\"\"\") def test_describes_single_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"),", "terms of the Mozilla Public # License, v. 2.0. If", "self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"), \"\"\"\\ at filename0 line 1:", "If a copy of the MPL was not distributed with", "Source Code Form is subject to the terms of the", "= start location = ((filename, (start, end)), location) with mock.patch(\"vunit.parsing.tokenizer.read_file\",", "autospec=True) as mock_file_exists: def file_exists_side_effect(filename): return filename in contents def", "None for idx, code in enumerate(codes): filename = \"filename%i\" %", "end)), location) with mock.patch(\"vunit.parsing.tokenizer.read_file\", autospec=True) as mock_read_file: with mock.patch(\"vunit.parsing.tokenizer.file_exists\", autospec=True)", "0)), None)), \"Unknown location in missing.svh\") def test_describe_none_filename_location(self): self.assertEqual(describe_location(((None, (0,", "def read_file_side_effect(filename): return contents[filename] mock_file_exists.side_effect = file_exists_side_effect mock_read_file.side_effect = read_file_side_effect", "mock.patch(\"vunit.parsing.tokenizer.read_file\", autospec=True) as mock_read_file: with mock.patch(\"vunit.parsing.tokenizer.file_exists\", autospec=True) as mock_file_exists: def", "1: S E ~~~\"\"\") def test_describes_multi_line_location(self): self.assertEqual( _describe_location(\"\"\"\\ S____ E", "idx contents[filename] = code start = code.index(\"S\") if \"E\" in", "location in missing.svh\") def test_describe_none_filename_location(self): self.assertEqual(describe_location(((None, (0, 0)), None)), \"Unknown", "at filename0 line 1: S ~\"\"\") def test_describes_single_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\", "at filename0 line 1: S E ~~~\"\"\") def test_describes_multi_char_location_within(self): self.assertEqual(", "# You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright", "\"\"\" Helper to test describe_location \"\"\" contents = {} location", "= code start = code.index(\"S\") if \"E\" in code: end", "SE\"\"\"), \"\"\"\\ from filename0 line 2: S__E ~~~~ at filename1", "the MPL was not distributed with this file, # You", "test_describes_multi_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"), \"\"\"\\ at filename0 line", "from filename0 line 2: S__E ~~~~ at filename1 line 3:", "\"\"\" Test of the general tokenizer \"\"\" def test_describes_single_char_location(self): self.assertEqual(", "S E ~~~\"\"\") def test_describes_multi_line_location(self): self.assertEqual( _describe_location(\"\"\"\\ S____ E \"\"\"),", "idx, code in enumerate(codes): filename = \"filename%i\" % idx contents[filename]", "<NAME> <EMAIL> \"\"\" Test of the general tokenizer \"\"\" from", "describe_location from vunit.test.mock_2or3 import mock class TestTokenizer(TestCase): \"\"\" Test of", "location) with mock.patch(\"vunit.parsing.tokenizer.read_file\", autospec=True) as mock_read_file: with mock.patch(\"vunit.parsing.tokenizer.file_exists\", autospec=True) as", "def test_describe_missing_location(self): self.assertEqual(describe_location(((\"missing.svh\", (0, 0)), None)), \"Unknown location in missing.svh\")", "<reponame>bjacobs1/vunit # This Source Code Form is subject to the", "Helper to test describe_location \"\"\" contents = {} location =", "the general tokenizer \"\"\" from unittest import TestCase from vunit.parsing.tokenizer", "~~\"\"\") def test_describe_location_none(self): self.assertEqual(describe_location(None), \"Unknown location\") def test_describe_missing_location(self): self.assertEqual(describe_location(((\"missing.svh\", (0,", "class TestTokenizer(TestCase): \"\"\" Test of the general tokenizer \"\"\" def", "2: S__E ~~~~ at filename1 line 3: SE ~~\"\"\") def", "test describe_location \"\"\" contents = {} location = None for", "line 2: S__E ~~~~ at filename1 line 3: SE ~~\"\"\")", "import describe_location from vunit.test.mock_2or3 import mock class TestTokenizer(TestCase): \"\"\" Test", "filename = \"filename%i\" % idx contents[filename] = code start =", "\"\"\"\\ at filename0 line 1: S ~\"\"\") def test_describes_single_char_location_within(self): self.assertEqual(", "Test of the general tokenizer \"\"\" def test_describes_single_char_location(self): self.assertEqual( _describe_location(\"\"\"\\", "def test_describe_location_none(self): self.assertEqual(describe_location(None), \"Unknown location\") def test_describe_missing_location(self): self.assertEqual(describe_location(((\"missing.svh\", (0, 0)),", "code in enumerate(codes): filename = \"filename%i\" % idx contents[filename] =", "\"\"\"\\ at filename0 line 1: S ~\"\"\") def test_describes_multi_char_location(self): self.assertEqual(", "line 1: S E ~~~\"\"\") def test_describes_multi_line_location(self): self.assertEqual( _describe_location(\"\"\"\\ S____", "def test_describes_multi_file_location(self): self.assertEqual( _describe_location(\"\"\"\\ S__E\"\"\", \"\"\"\\ SE\"\"\"), \"\"\"\\ from filename0", "% idx contents[filename] = code start = code.index(\"S\") if \"E\"", "not distributed with this file, # You can obtain one", "location = None for idx, code in enumerate(codes): filename =", "= {} location = None for idx, code in enumerate(codes):", "(c) 2014-2018, <NAME> <EMAIL> \"\"\" Test of the general tokenizer", "obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (c) 2014-2018, <NAME>", "<EMAIL> \"\"\" Test of the general tokenizer \"\"\" from unittest", "vunit.test.mock_2or3 import mock class TestTokenizer(TestCase): \"\"\" Test of the general", "None)), \"Unknown location in missing.svh\") def test_describe_none_filename_location(self): self.assertEqual(describe_location(((None, (0, 0)),", "filename0 line 1: S ~\"\"\") def test_describes_single_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S", "in missing.svh\") def test_describe_none_filename_location(self): self.assertEqual(describe_location(((None, (0, 0)), None)), \"Unknown Python", "filename0 line 2: S__E ~~~~ at filename1 line 3: SE", "at filename0 line 1: S____ ~~~~~\"\"\") def test_describes_multi_file_location(self): self.assertEqual( _describe_location(\"\"\"\\", "MPL was not distributed with this file, # You can", "of the general tokenizer \"\"\" from unittest import TestCase from", "distributed with this file, # You can obtain one at", "SE ~~\"\"\") def test_describe_location_none(self): self.assertEqual(describe_location(None), \"Unknown location\") def test_describe_missing_location(self): self.assertEqual(describe_location(((\"missing.svh\",", "read_file_side_effect(filename): return contents[filename] mock_file_exists.side_effect = file_exists_side_effect mock_read_file.side_effect = read_file_side_effect retval", "file_exists_side_effect(filename): return filename in contents def read_file_side_effect(filename): return contents[filename] mock_file_exists.side_effect", "\"Unknown location\") def test_describe_missing_location(self): self.assertEqual(describe_location(((\"missing.svh\", (0, 0)), None)), \"Unknown location", "filename in contents def read_file_side_effect(filename): return contents[filename] mock_file_exists.side_effect = file_exists_side_effect", "to the terms of the Mozilla Public # License, v.", "def _describe_location(*codes): \"\"\" Helper to test describe_location \"\"\" contents =", "self.assertEqual(describe_location(((None, (0, 0)), None)), \"Unknown Python string\") def _describe_location(*codes): \"\"\"", "~\"\"\") def test_describes_multi_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"), \"\"\"\\ at", "contents[filename] = code start = code.index(\"S\") if \"E\" in code:", "E \"\"\"), \"\"\"\\ at filename0 line 1: S E ~~~\"\"\")", "(0, 0)), None)), \"Unknown location in missing.svh\") def test_describe_none_filename_location(self): self.assertEqual(describe_location(((None,", "contents def read_file_side_effect(filename): return contents[filename] mock_file_exists.side_effect = file_exists_side_effect mock_read_file.side_effect =", "= file_exists_side_effect mock_read_file.side_effect = read_file_side_effect retval = describe_location(location=location) return retval", "from unittest import TestCase from vunit.parsing.tokenizer import describe_location from vunit.test.mock_2or3", "S____ ~~~~~\"\"\") def test_describes_multi_file_location(self): self.assertEqual( _describe_location(\"\"\"\\ S__E\"\"\", \"\"\"\\ SE\"\"\"), \"\"\"\\", "\"\"\"\\ SE\"\"\"), \"\"\"\\ from filename0 line 2: S__E ~~~~ at", "\"\"\"\\ at filename0 line 1: S E ~~~\"\"\") def test_describes_multi_char_location_within(self):", "filename1 line 3: SE ~~\"\"\") def test_describe_location_none(self): self.assertEqual(describe_location(None), \"Unknown location\")", "\"\"\"\\ from filename0 line 2: S__E ~~~~ at filename1 line", "code: end = code.index(\"E\") else: end = start location =", "S E \"\"\"), \"\"\"\\ at filename0 line 1: S E", "mock.patch(\"vunit.parsing.tokenizer.file_exists\", autospec=True) as mock_file_exists: def file_exists_side_effect(filename): return filename in contents", "\"\"\" contents = {} location = None for idx, code", "general tokenizer \"\"\" def test_describes_single_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\", "S____ E \"\"\"), \"\"\"\\ at filename0 line 1: S____ ~~~~~\"\"\")", "return contents[filename] mock_file_exists.side_effect = file_exists_side_effect mock_read_file.side_effect = read_file_side_effect retval =", "line 3: SE ~~\"\"\") def test_describe_location_none(self): self.assertEqual(describe_location(None), \"Unknown location\") def", "for idx, code in enumerate(codes): filename = \"filename%i\" % idx", "E \"\"\"), \"\"\"\\ at filename0 line 1: S____ ~~~~~\"\"\") def", "Python string\") def _describe_location(*codes): \"\"\" Helper to test describe_location \"\"\"", "self.assertEqual( _describe_location(\"\"\"\\ S____ E \"\"\"), \"\"\"\\ at filename0 line 1:", "\"Unknown Python string\") def _describe_location(*codes): \"\"\" Helper to test describe_location", "self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\ at filename0 line 1: S", "filename0 line 1: S E ~~~\"\"\") def test_describes_multi_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\", "S ~\"\"\") def test_describes_single_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\ at", "_describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\ at filename0 line 1: S ~\"\"\")", "License, v. 2.0. If a copy of the MPL was", "= code.index(\"E\") else: end = start location = ((filename, (start,", "# License, v. 2.0. If a copy of the MPL", "contents[filename] mock_file_exists.side_effect = file_exists_side_effect mock_read_file.side_effect = read_file_side_effect retval = describe_location(location=location)", "line 1: S ~\"\"\") def test_describes_multi_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S E", "Test of the general tokenizer \"\"\" from unittest import TestCase", "1: S E ~~~\"\"\") def test_describes_multi_char_location_within(self): self.assertEqual( _describe_location(\"\"\"\\ S E", "return filename in contents def read_file_side_effect(filename): return contents[filename] mock_file_exists.side_effect =", "test_describe_location_none(self): self.assertEqual(describe_location(None), \"Unknown location\") def test_describe_missing_location(self): self.assertEqual(describe_location(((\"missing.svh\", (0, 0)), None)),", "1: S ~\"\"\") def test_describes_multi_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S E \"\"\"),", "of the Mozilla Public # License, v. 2.0. If a", "line 1: S____ ~~~~~\"\"\") def test_describes_multi_file_location(self): self.assertEqual( _describe_location(\"\"\"\\ S__E\"\"\", \"\"\"\\", "to test describe_location \"\"\" contents = {} location = None", "test_describes_single_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\ at filename0 line 1:", "test_describe_missing_location(self): self.assertEqual(describe_location(((\"missing.svh\", (0, 0)), None)), \"Unknown location in missing.svh\") def", "if \"E\" in code: end = code.index(\"E\") else: end =", "\"\"\"\\ at filename0 line 1: S____ ~~~~~\"\"\") def test_describes_multi_file_location(self): self.assertEqual(", "~~~~~\"\"\") def test_describes_multi_file_location(self): self.assertEqual( _describe_location(\"\"\"\\ S__E\"\"\", \"\"\"\\ SE\"\"\"), \"\"\"\\ from", "the general tokenizer \"\"\" def test_describes_single_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"),", "can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (c) 2014-2018,", "filename0 line 1: S ~\"\"\") def test_describes_multi_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S", "filename0 line 1: S____ ~~~~~\"\"\") def test_describes_multi_file_location(self): self.assertEqual( _describe_location(\"\"\"\\ S__E\"\"\",", "tokenizer \"\"\" from unittest import TestCase from vunit.parsing.tokenizer import describe_location", "Mozilla Public # License, v. 2.0. If a copy of", "code.index(\"E\") else: end = start location = ((filename, (start, end)),", "tokenizer \"\"\" def test_describes_single_char_location(self): self.assertEqual( _describe_location(\"\"\"\\ S \"\"\"), \"\"\"\\ at" ]
[ ": \"Running STAR second pass with {input.R1}. \\n\" shell: \"\"\"", "\"Running STAR second pass with {input.R1}. \\n\" shell: \"\"\" singularity", "with {input.R1}. \\n\" shell: \"\"\" singularity exec -B {params.bind} {params.cont}", "ancient(config[\"REF\"]) output: bam = config[\"MAP\"] + \"{samples}_sorted.bam.gz\" params: prefix =", "logs for follow up and debuging if needed # #", "path # strategy. The goal is to align reads taking", "- logs for follow up and debuging if needed #", "+ \"{samples}_trim.fastq.gz\", genomeDir = ancient(config[\"REF\"]) output: bam = config[\"MAP\"] +", "\\ --scoreDelOpen -2 \\ --scoreDelBase -2 \\ --scoreInsOpen -2 \\", "\\ --scoreGapATAC -8 \\ --scoreGenomicLengthLog2scale -0.25 \\ --scoreDelOpen -2 \\", "junction files (.tab) # # Output: # - aligned reads", "output: bam = config[\"MAP\"] + \"{samples}_sorted.bam.gz\" params: prefix = config[\"MAP\"]", "+ \"{samples}.\", tmp = config[\"MAP\"] + \"SP/\" + \"{samples}_sp_STAR_TMP\", bind", "\\ --outTmpDir {params.tmp} \\ --scoreGap 0 \\ --scoreGapNoncan -8 \\", "prefix = config[\"MAP\"] + \"{samples}.\", tmp = config[\"MAP\"] + \"SP/\"", "--outTmpDir {params.tmp} \\ --scoreGap 0 \\ --scoreGapNoncan -8 \\ --scoreGapGCAG", "\\ --scoreDelBase -2 \\ --scoreInsOpen -2 \\ --scoreInsBase -2 \\", "aligner 2 path # strategy. The goal is to align", "1 \\ --readFilesCommand zcat | gzip --stdout > {output.bam} \"\"\"", "config[\"CONT\"] benchmark: \"benchmarks/star_SP/{samples}.tsv\" message : \"Running STAR second pass with", "input flag = ancient(config[\"REF\"] + \"REindexing_done.txt\"), R1 = config[\"TRIMMED\"] +", "splicing junction files (.tab) # # Output: # - aligned", "{params.tmp} \\ --scoreGap 0 \\ --scoreGapNoncan -8 \\ --scoreGapGCAG -4", "account splice # junction found in the fist pass.. #", "STAR aligner single end mode, second pass # # This", "pass # # This module runs the second pass of", "message : \"Running STAR second pass with {input.R1}. \\n\" shell:", "{input.R1} \\ --outSAMtype BAM SortedByCoordinate \\ --outFileNamePrefix {params.prefix} \\ --outStd", "files (.tab) # # Output: # - aligned reads #", "is specified. rule star_se_SP: input: # fake input flag =", "\\ --outStd BAM_SortedByCoordinate \\ --outTmpDir {params.tmp} \\ --scoreGap 0 \\", "--scoreStitchSJshift 1 \\ --readFilesCommand zcat | gzip --stdout > {output.bam}", "number is specified. rule star_se_SP: input: # fake input flag", "# junction found in the fist pass.. # # Inputs:", "pass with {input.R1}. \\n\" shell: \"\"\" singularity exec -B {params.bind}", "-8 \\ --scoreGenomicLengthLog2scale -0.25 \\ --scoreDelOpen -2 \\ --scoreDelBase -2", "= config[\"BIND\"], cont = config[\"CONT\"] benchmark: \"benchmarks/star_SP/{samples}.tsv\" message : \"Running", "--scoreGap 0 \\ --scoreGapNoncan -8 \\ --scoreGapGCAG -4 \\ --scoreGapATAC", "follow up and debuging if needed # # Parameters: #", "R1 = config[\"TRIMMED\"] + \"{samples}_trim.fastq.gz\", genomeDir = ancient(config[\"REF\"]) output: bam", "\\n\" shell: \"\"\" singularity exec -B {params.bind} {params.cont} \\ STAR", "fake input flag = ancient(config[\"REF\"] + \"REindexing_done.txt\"), R1 = config[\"TRIMMED\"]", "the threads number is specified. rule star_se_SP: input: # fake", "{input.R1}. \\n\" shell: \"\"\" singularity exec -B {params.bind} {params.cont} \\", "--genomeDir {input.genomeDir} \\ --readFilesIn {input.R1} \\ --outSAMtype BAM SortedByCoordinate \\", "aligned reads # - logs for follow up and debuging", "# Parameters: # No fancy parameters needed, only the threads", "# - aligned reads # - logs for follow up", "\\ --scoreInsBase -2 \\ --scoreStitchSJshift 1 \\ --readFilesCommand zcat |", "STAR second pass with {input.R1}. \\n\" shell: \"\"\" singularity exec", "# STAR aligner single end mode, second pass # #", "# No fancy parameters needed, only the threads number is", "pass of the STAR aligner 2 path # strategy. The", "\\ --outFileNamePrefix {params.prefix} \\ --outStd BAM_SortedByCoordinate \\ --outTmpDir {params.tmp} \\", "Output: # - aligned reads # - logs for follow", "--scoreDelBase -2 \\ --scoreInsOpen -2 \\ --scoreInsBase -2 \\ --scoreStitchSJshift", "-8 \\ --scoreGapGCAG -4 \\ --scoreGapATAC -8 \\ --scoreGenomicLengthLog2scale -0.25", "\\ STAR \\ --runThreadN 10 \\ --genomeDir {input.genomeDir} \\ --readFilesIn", "debuging if needed # # Parameters: # No fancy parameters", "SortedByCoordinate \\ --outFileNamePrefix {params.prefix} \\ --outStd BAM_SortedByCoordinate \\ --outTmpDir {params.tmp}", "--scoreGapATAC -8 \\ --scoreGenomicLengthLog2scale -0.25 \\ --scoreDelOpen -2 \\ --scoreDelBase", "up and debuging if needed # # Parameters: # No", "# - sample_trim.fastq.gz # - splicing junction files (.tab) #", "Parameters: # No fancy parameters needed, only the threads number", "--outFileNamePrefix {params.prefix} \\ --outStd BAM_SortedByCoordinate \\ --outTmpDir {params.tmp} \\ --scoreGap", "{params.cont} \\ STAR \\ --runThreadN 10 \\ --genomeDir {input.genomeDir} \\", "-2 \\ --scoreInsOpen -2 \\ --scoreInsBase -2 \\ --scoreStitchSJshift 1", "for follow up and debuging if needed # # Parameters:", "= config[\"CONT\"] benchmark: \"benchmarks/star_SP/{samples}.tsv\" message : \"Running STAR second pass", "STAR aligner 2 path # strategy. The goal is to", "STAR \\ --runThreadN 10 \\ --genomeDir {input.genomeDir} \\ --readFilesIn {input.R1}", "config[\"TRIMMED\"] + \"{samples}_trim.fastq.gz\", genomeDir = ancient(config[\"REF\"]) output: bam = config[\"MAP\"]", "= config[\"MAP\"] + \"{samples}_sorted.bam.gz\" params: prefix = config[\"MAP\"] + \"{samples}.\",", "+ \"REindexing_done.txt\"), R1 = config[\"TRIMMED\"] + \"{samples}_trim.fastq.gz\", genomeDir = ancient(config[\"REF\"])", "+ \"SP/\" + \"{samples}_sp_STAR_TMP\", bind = config[\"BIND\"], cont = config[\"CONT\"]", "pass.. # # Inputs: # - sample_trim.fastq.gz # - splicing", "in the fist pass.. # # Inputs: # - sample_trim.fastq.gz", "--scoreInsBase -2 \\ --scoreStitchSJshift 1 \\ --readFilesCommand zcat | gzip", "-B {params.bind} {params.cont} \\ STAR \\ --runThreadN 10 \\ --genomeDir", "-2 \\ --scoreDelBase -2 \\ --scoreInsOpen -2 \\ --scoreInsBase -2", "and debuging if needed # # Parameters: # No fancy", "\\ --readFilesIn {input.R1} \\ --outSAMtype BAM SortedByCoordinate \\ --outFileNamePrefix {params.prefix}", "sample_trim.fastq.gz # - splicing junction files (.tab) # # Output:", "# # Output: # - aligned reads # - logs", "genomeDir = ancient(config[\"REF\"]) output: bam = config[\"MAP\"] + \"{samples}_sorted.bam.gz\" params:", "\\ --scoreStitchSJshift 1 \\ --readFilesCommand zcat | gzip --stdout >", "to align reads taking in account splice # junction found", "0 \\ --scoreGapNoncan -8 \\ --scoreGapGCAG -4 \\ --scoreGapATAC -8", "Inputs: # - sample_trim.fastq.gz # - splicing junction files (.tab)", "config[\"MAP\"] + \"SP/\" + \"{samples}_sp_STAR_TMP\", bind = config[\"BIND\"], cont =", "taking in account splice # junction found in the fist", "end mode, second pass # # This module runs the", "# - splicing junction files (.tab) # # Output: #", "- splicing junction files (.tab) # # Output: # -", "- sample_trim.fastq.gz # - splicing junction files (.tab) # #", "singularity exec -B {params.bind} {params.cont} \\ STAR \\ --runThreadN 10", "\\ --scoreGenomicLengthLog2scale -0.25 \\ --scoreDelOpen -2 \\ --scoreDelBase -2 \\", "\\ --genomeDir {input.genomeDir} \\ --readFilesIn {input.R1} \\ --outSAMtype BAM SortedByCoordinate", "module runs the second pass of the STAR aligner 2", "strategy. The goal is to align reads taking in account", "+ \"{samples}_sorted.bam.gz\" params: prefix = config[\"MAP\"] + \"{samples}.\", tmp =", "-2 \\ --scoreStitchSJshift 1 \\ --readFilesCommand zcat | gzip --stdout", "-4 \\ --scoreGapATAC -8 \\ --scoreGenomicLengthLog2scale -0.25 \\ --scoreDelOpen -2", "\"{samples}.\", tmp = config[\"MAP\"] + \"SP/\" + \"{samples}_sp_STAR_TMP\", bind =", "BAM SortedByCoordinate \\ --outFileNamePrefix {params.prefix} \\ --outStd BAM_SortedByCoordinate \\ --outTmpDir", "10 \\ --genomeDir {input.genomeDir} \\ --readFilesIn {input.R1} \\ --outSAMtype BAM", "flag = ancient(config[\"REF\"] + \"REindexing_done.txt\"), R1 = config[\"TRIMMED\"] + \"{samples}_trim.fastq.gz\",", "+ \"{samples}_sp_STAR_TMP\", bind = config[\"BIND\"], cont = config[\"CONT\"] benchmark: \"benchmarks/star_SP/{samples}.tsv\"", "reads taking in account splice # junction found in the", "--scoreInsOpen -2 \\ --scoreInsBase -2 \\ --scoreStitchSJshift 1 \\ --readFilesCommand", "junction found in the fist pass.. # # Inputs: #", "\"{samples}_trim.fastq.gz\", genomeDir = ancient(config[\"REF\"]) output: bam = config[\"MAP\"] + \"{samples}_sorted.bam.gz\"", "cont = config[\"CONT\"] benchmark: \"benchmarks/star_SP/{samples}.tsv\" message : \"Running STAR second", "# strategy. The goal is to align reads taking in", "= config[\"TRIMMED\"] + \"{samples}_trim.fastq.gz\", genomeDir = ancient(config[\"REF\"]) output: bam =", "needed # # Parameters: # No fancy parameters needed, only", "\"{samples}_sp_STAR_TMP\", bind = config[\"BIND\"], cont = config[\"CONT\"] benchmark: \"benchmarks/star_SP/{samples}.tsv\" message", "No fancy parameters needed, only the threads number is specified.", "in account splice # junction found in the fist pass..", "--outStd BAM_SortedByCoordinate \\ --outTmpDir {params.tmp} \\ --scoreGap 0 \\ --scoreGapNoncan", "- aligned reads # - logs for follow up and", "config[\"MAP\"] + \"{samples}_sorted.bam.gz\" params: prefix = config[\"MAP\"] + \"{samples}.\", tmp", "shell: \"\"\" singularity exec -B {params.bind} {params.cont} \\ STAR \\", "fancy parameters needed, only the threads number is specified. rule", "found in the fist pass.. # # Inputs: # -", "\\ --scoreInsOpen -2 \\ --scoreInsBase -2 \\ --scoreStitchSJshift 1 \\", "parameters needed, only the threads number is specified. rule star_se_SP:", "mode, second pass # # This module runs the second", "\"\"\" singularity exec -B {params.bind} {params.cont} \\ STAR \\ --runThreadN", "= ancient(config[\"REF\"] + \"REindexing_done.txt\"), R1 = config[\"TRIMMED\"] + \"{samples}_trim.fastq.gz\", genomeDir", "input: # fake input flag = ancient(config[\"REF\"] + \"REindexing_done.txt\"), R1", "benchmark: \"benchmarks/star_SP/{samples}.tsv\" message : \"Running STAR second pass with {input.R1}.", "{input.genomeDir} \\ --readFilesIn {input.R1} \\ --outSAMtype BAM SortedByCoordinate \\ --outFileNamePrefix", "of the STAR aligner 2 path # strategy. The goal", "<filename>modules/star_se_SP.py # STAR aligner single end mode, second pass #", "# This module runs the second pass of the STAR", "only the threads number is specified. rule star_se_SP: input: #", "fist pass.. # # Inputs: # - sample_trim.fastq.gz # -", "the fist pass.. # # Inputs: # - sample_trim.fastq.gz #", "exec -B {params.bind} {params.cont} \\ STAR \\ --runThreadN 10 \\", "--readFilesIn {input.R1} \\ --outSAMtype BAM SortedByCoordinate \\ --outFileNamePrefix {params.prefix} \\", "{params.prefix} \\ --outStd BAM_SortedByCoordinate \\ --outTmpDir {params.tmp} \\ --scoreGap 0", "-2 \\ --scoreInsBase -2 \\ --scoreStitchSJshift 1 \\ --readFilesCommand zcat", "bam = config[\"MAP\"] + \"{samples}_sorted.bam.gz\" params: prefix = config[\"MAP\"] +", "aligner single end mode, second pass # # This module", "\"REindexing_done.txt\"), R1 = config[\"TRIMMED\"] + \"{samples}_trim.fastq.gz\", genomeDir = ancient(config[\"REF\"]) output:", "--scoreGenomicLengthLog2scale -0.25 \\ --scoreDelOpen -2 \\ --scoreDelBase -2 \\ --scoreInsOpen", "# - logs for follow up and debuging if needed", "second pass of the STAR aligner 2 path # strategy.", "BAM_SortedByCoordinate \\ --outTmpDir {params.tmp} \\ --scoreGap 0 \\ --scoreGapNoncan -8", "\\ --scoreGapNoncan -8 \\ --scoreGapGCAG -4 \\ --scoreGapATAC -8 \\", "the STAR aligner 2 path # strategy. The goal is", "single end mode, second pass # # This module runs", "if needed # # Parameters: # No fancy parameters needed,", "second pass with {input.R1}. \\n\" shell: \"\"\" singularity exec -B", "splice # junction found in the fist pass.. # #", "align reads taking in account splice # junction found in", "reads # - logs for follow up and debuging if", "ancient(config[\"REF\"] + \"REindexing_done.txt\"), R1 = config[\"TRIMMED\"] + \"{samples}_trim.fastq.gz\", genomeDir =", "is to align reads taking in account splice # junction", "--scoreGapGCAG -4 \\ --scoreGapATAC -8 \\ --scoreGenomicLengthLog2scale -0.25 \\ --scoreDelOpen", "= config[\"MAP\"] + \"SP/\" + \"{samples}_sp_STAR_TMP\", bind = config[\"BIND\"], cont", "\\ --scoreGap 0 \\ --scoreGapNoncan -8 \\ --scoreGapGCAG -4 \\", "\\ --scoreGapGCAG -4 \\ --scoreGapATAC -8 \\ --scoreGenomicLengthLog2scale -0.25 \\", "second pass # # This module runs the second pass", "\"{samples}_sorted.bam.gz\" params: prefix = config[\"MAP\"] + \"{samples}.\", tmp = config[\"MAP\"]", "{params.bind} {params.cont} \\ STAR \\ --runThreadN 10 \\ --genomeDir {input.genomeDir}", "star_se_SP: input: # fake input flag = ancient(config[\"REF\"] + \"REindexing_done.txt\"),", "--scoreDelOpen -2 \\ --scoreDelBase -2 \\ --scoreInsOpen -2 \\ --scoreInsBase", "# Output: # - aligned reads # - logs for", "2 path # strategy. The goal is to align reads", "# Inputs: # - sample_trim.fastq.gz # - splicing junction files", "# # Inputs: # - sample_trim.fastq.gz # - splicing junction", "# fake input flag = ancient(config[\"REF\"] + \"REindexing_done.txt\"), R1 =", "--outSAMtype BAM SortedByCoordinate \\ --outFileNamePrefix {params.prefix} \\ --outStd BAM_SortedByCoordinate \\", "config[\"BIND\"], cont = config[\"CONT\"] benchmark: \"benchmarks/star_SP/{samples}.tsv\" message : \"Running STAR", "tmp = config[\"MAP\"] + \"SP/\" + \"{samples}_sp_STAR_TMP\", bind = config[\"BIND\"],", "(.tab) # # Output: # - aligned reads # -", "# # Parameters: # No fancy parameters needed, only the", "= config[\"MAP\"] + \"{samples}.\", tmp = config[\"MAP\"] + \"SP/\" +", "# # This module runs the second pass of the", "rule star_se_SP: input: # fake input flag = ancient(config[\"REF\"] +", "\\ --outSAMtype BAM SortedByCoordinate \\ --outFileNamePrefix {params.prefix} \\ --outStd BAM_SortedByCoordinate", "params: prefix = config[\"MAP\"] + \"{samples}.\", tmp = config[\"MAP\"] +", "the second pass of the STAR aligner 2 path #", "The goal is to align reads taking in account splice", "= ancient(config[\"REF\"]) output: bam = config[\"MAP\"] + \"{samples}_sorted.bam.gz\" params: prefix", "\\ --runThreadN 10 \\ --genomeDir {input.genomeDir} \\ --readFilesIn {input.R1} \\", "\"SP/\" + \"{samples}_sp_STAR_TMP\", bind = config[\"BIND\"], cont = config[\"CONT\"] benchmark:", "This module runs the second pass of the STAR aligner", "goal is to align reads taking in account splice #", "specified. rule star_se_SP: input: # fake input flag = ancient(config[\"REF\"]", "threads number is specified. rule star_se_SP: input: # fake input", "bind = config[\"BIND\"], cont = config[\"CONT\"] benchmark: \"benchmarks/star_SP/{samples}.tsv\" message :", "\"benchmarks/star_SP/{samples}.tsv\" message : \"Running STAR second pass with {input.R1}. \\n\"", "--runThreadN 10 \\ --genomeDir {input.genomeDir} \\ --readFilesIn {input.R1} \\ --outSAMtype", "-0.25 \\ --scoreDelOpen -2 \\ --scoreDelBase -2 \\ --scoreInsOpen -2", "runs the second pass of the STAR aligner 2 path", "needed, only the threads number is specified. rule star_se_SP: input:", "config[\"MAP\"] + \"{samples}.\", tmp = config[\"MAP\"] + \"SP/\" + \"{samples}_sp_STAR_TMP\",", "--scoreGapNoncan -8 \\ --scoreGapGCAG -4 \\ --scoreGapATAC -8 \\ --scoreGenomicLengthLog2scale" ]
[ "class JobOffer(models.Model): company_name = models.CharField(max_length=50) company_email = models.EmailField() job_title =", "city = models.CharField(max_length=35) state = models.CharField(max_length=35) created_at = models.DateField(auto_now_add=True) available", "company_email = models.EmailField() job_title = models.CharField(max_length=60) job_description = models.TextField() salary", "import models class JobOffer(models.Model): company_name = models.CharField(max_length=50) company_email = models.EmailField()", "from django.db import models class JobOffer(models.Model): company_name = models.CharField(max_length=50) company_email", "models.CharField(max_length=35) state = models.CharField(max_length=35) created_at = models.DateField(auto_now_add=True) available = models.BooleanField(default=True)", "= models.CharField(max_length=50) company_email = models.EmailField() job_title = models.CharField(max_length=60) job_description =", "= models.TextField() salary = models.PositiveIntegerField() city = models.CharField(max_length=35) state =", "state = models.CharField(max_length=35) created_at = models.DateField(auto_now_add=True) available = models.BooleanField(default=True) def", "created_at = models.DateField(auto_now_add=True) available = models.BooleanField(default=True) def __str__(self): return self.company_name", "models.CharField(max_length=50) company_email = models.EmailField() job_title = models.CharField(max_length=60) job_description = models.TextField()", "job_title = models.CharField(max_length=60) job_description = models.TextField() salary = models.PositiveIntegerField() city", "job_description = models.TextField() salary = models.PositiveIntegerField() city = models.CharField(max_length=35) state", "= models.EmailField() job_title = models.CharField(max_length=60) job_description = models.TextField() salary =", "company_name = models.CharField(max_length=50) company_email = models.EmailField() job_title = models.CharField(max_length=60) job_description", "= models.CharField(max_length=35) created_at = models.DateField(auto_now_add=True) available = models.BooleanField(default=True) def __str__(self):", "JobOffer(models.Model): company_name = models.CharField(max_length=50) company_email = models.EmailField() job_title = models.CharField(max_length=60)", "models.CharField(max_length=35) created_at = models.DateField(auto_now_add=True) available = models.BooleanField(default=True) def __str__(self): return", "= models.CharField(max_length=60) job_description = models.TextField() salary = models.PositiveIntegerField() city =", "models.PositiveIntegerField() city = models.CharField(max_length=35) state = models.CharField(max_length=35) created_at = models.DateField(auto_now_add=True)", "models.TextField() salary = models.PositiveIntegerField() city = models.CharField(max_length=35) state = models.CharField(max_length=35)", "salary = models.PositiveIntegerField() city = models.CharField(max_length=35) state = models.CharField(max_length=35) created_at", "models class JobOffer(models.Model): company_name = models.CharField(max_length=50) company_email = models.EmailField() job_title", "django.db import models class JobOffer(models.Model): company_name = models.CharField(max_length=50) company_email =", "= models.CharField(max_length=35) state = models.CharField(max_length=35) created_at = models.DateField(auto_now_add=True) available =", "models.EmailField() job_title = models.CharField(max_length=60) job_description = models.TextField() salary = models.PositiveIntegerField()", "= models.PositiveIntegerField() city = models.CharField(max_length=35) state = models.CharField(max_length=35) created_at =", "models.CharField(max_length=60) job_description = models.TextField() salary = models.PositiveIntegerField() city = models.CharField(max_length=35)" ]
[ "if request.method == \"POST\": username=request.POST.get('username').lower() password=request.POST.get('password') try: user=User.objects.get(username=username) except: messages.error(request,", "form_results.save() return redirect('index') context = {\"form\": form} return render(request, 'memeapp/upload_picture.html',", "comment(request,pk): profile = Image.objects.get(pk=pk) form_results = CommentForm(request.POST,instance=profile) if request.method ==", "is invalid') else: form_results = CommentForm context = {'form':form_results,'image':profile} return", "redirect('index') else: messages.error(request, 'Error occured during registration') context = {'reg_form':form}", "PictureUploadForm(request.POST,request.FILES) if form_results.is_valid(): form_results.save() return redirect('index') context = {\"form\": form}", "\"You haven't searched for any term\") message = \"You haven't", "comment=comment, created_on=datetime.now()) comment_content.save() profile.comments_number = profile.comments_number + 1 profile.save() return", "= Image.objects.get(id=id) image.likes_number = image.likes_number + 1 image.save() return redirect('/')", "request.GET[\"search_query\"]: search_term = request.GET.get(\"search_query\").lower() searched_results = Image.search_image(search_term) message = f\"{search_term}\"", "= Image.objects.filter(user_id=current_user.id) return render(request, 'memeapp/profile.html', {\"profile\": images,\"images\":profiles}) @login_required(login_url='login') def each_image(request,", "= Image.search_image(search_term) message = f\"{search_term}\" context = {'message': message, 'results':", "login_required from django.contrib .auth import authenticate,login,logout from django.contrib.auth.forms import UserCreationForm", "logoutUser(request): logout(request) return redirect('index') @login_required(login_url='login') def uploadPicture(request): form = PictureUploadForm()", "registration') context = {'reg_form':form} return render(request, 'memeapp/auth.html',context) def loginPage(request): page='login'", ".auth import authenticate,login,logout from django.contrib.auth.forms import UserCreationForm from datetime import", "== \"POST\": username=request.POST.get('username').lower() password=request.POST.get('password') try: user=User.objects.get(username=username) except: messages.error(request, 'User does", "likes = Likes(image_id=id, user_id=request.user.id) likes.save() image = Image.objects.get(id=id) image.likes_number =", "render(request,'memeapp/index.html',context) def registerPage(request): form=UserCreationForm() if request.method == \"POST\": form_results=UserCreationForm(request.POST) if", "@login_required(login_url='login') def each_image(request, id): image = Image.objects.get(id=id) return render(request, 'memeapp/image_details.html',", "request.GET.get(\"search_query\").lower() searched_results = Image.search_image(search_term) message = f\"{search_term}\" context = {'message':", "not exist') context={'page':page} return render(request, 'memeapp/auth.html', context) def logoutUser(request): logout(request)", "'title': title} return render(request, 'memeapp/search.html', context) else: messages.error(request, \"You haven't", "f\"{search_term}\" context = {'message': message, 'results': searched_results, 'title': title} return", "request.method == \"POST\": if form_results.is_valid(): user = request.user comment= form_results.cleaned_data['comment']", "render(request, 'memeapp/auth.html',context) def loginPage(request): page='login' if request.user.is_authenticated: return redirect('index') if", "'memeapp/profile.html', {\"profile\": images,\"images\":profiles}) @login_required(login_url='login') def each_image(request, id): image = Image.objects.get(id=id)", "from django.contrib .auth import authenticate,login,logout from django.contrib.auth.forms import UserCreationForm from", "django.contrib.auth.forms import UserCreationForm from datetime import datetime def index(request): images=Image.objects.all()", "user.save() login(request,user) return redirect('index') else: messages.error(request, 'Error occured during registration')", "logout(request) return redirect('index') @login_required(login_url='login') def uploadPicture(request): form = PictureUploadForm() if", "Likes.objects.filter(image_id=id, user_id=request.user.id).exists(): likes.delete() image = Image.objects.get(id=id) if image.likes_number == 0:", "render(request, 'memeapp/auth.html', context) def logoutUser(request): logout(request) return redirect('index') @login_required(login_url='login') def", "request.user images = Profile.objects.filter(user_id=current_user.id).first() profiles = Image.objects.filter(user_id=current_user.id) return render(request, 'memeapp/profile.html',", "else: print('form is invalid') else: form_results = CommentForm context =", "redirect('index') else: print('form is invalid') else: form_results = CommentForm context", "image = Image.objects.get(id=id) if image.likes_number == 0: image.likes_number = 0", "Profile.objects.filter(user_id=current_user.id).first() profiles = Image.objects.filter(user_id=current_user.id) return render(request, 'memeapp/profile.html', {\"profile\": images,\"images\":profiles}) @login_required(login_url='login')", "form_results.cleaned_data['comment'] comment_content = Comments(user=user, image=profile, comment=comment, created_on=datetime.now()) comment_content.save() profile.comments_number =", "return render(request,'memeapp/index.html',context) def registerPage(request): form=UserCreationForm() if request.method == \"POST\": form_results=UserCreationForm(request.POST)", "from django.contrib.auth.models import User from django.contrib import messages from .forms", "= {'reg_form':form} return render(request, 'memeapp/auth.html',context) def loginPage(request): page='login' if request.user.is_authenticated:", "CommentForm(request.POST,instance=profile) if request.method == \"POST\": if form_results.is_valid(): user = request.user", "user_id=request.user.id).exists(): likes.delete() image = Image.objects.get(id=id) if image.likes_number == 0: image.likes_number", "form_results = CommentForm context = {'form':form_results,'image':profile} return render(request,'memeapp/comments.html',context) def search(request):", "exist') context={'page':page} return render(request, 'memeapp/auth.html', context) def logoutUser(request): logout(request) return", "1 image.save() return redirect('/') else: likes = Likes(image_id=id, user_id=request.user.id) likes.save()", "authenticate,login,logout from django.contrib.auth.forms import UserCreationForm from datetime import datetime def", "searched_results, 'title': title} return render(request, 'memeapp/search.html', context) else: messages.error(request, \"You", "images=Image.objects.all() context={'images':images} return render(request,'memeapp/index.html',context) def registerPage(request): form=UserCreationForm() if request.method ==", "page='login' if request.user.is_authenticated: return redirect('index') if request.method == \"POST\": username=request.POST.get('username').lower()", "return render(request, 'memeapp/search.html', context) else: messages.error(request, \"You haven't searched for", "return render(request, 'memeapp/auth.html', context) def logoutUser(request): logout(request) return redirect('index') @login_required(login_url='login')", "else: messages.error(request, 'Error occured during registration') context = {'reg_form':form} return", "image.save() return redirect('/') @login_required(login_url='login') def comment(request,pk): profile = Image.objects.get(pk=pk) form_results", "redirect('index') else: messages.error(request, 'Username OR Password does not exist') context={'page':page}", "try: user=User.objects.get(username=username) except: messages.error(request, 'User does not exist') user=authenticate(request,username=username,password=password) if", "login(request,user) return redirect('index') else: messages.error(request, 'Error occured during registration') context", "= \"Search\" if 'search_query' in request.GET and request.GET[\"search_query\"]: search_term =", "= {'form':form_results,'image':profile} return render(request,'memeapp/comments.html',context) def search(request): title = \"Search\" if", "current_user = request.user images = Profile.objects.filter(user_id=current_user.id).first() profiles = Image.objects.filter(user_id=current_user.id) return", "'memeapp/upload_picture.html', context) @login_required(login_url='login') def my_images(request): current_user = request.user images =", "= Image.objects.get(pk=pk) form_results = CommentForm(request.POST,instance=profile) if request.method == \"POST\": if", "context={'page':page} return render(request, 'memeapp/auth.html', context) def logoutUser(request): logout(request) return redirect('index')", "title} return render(request, 'memeapp/search.html', context) else: messages.error(request, \"You haven't searched", "form_results=UserCreationForm(request.POST) if form_results.is_valid(): user =form_results.save(commit=False) user.username=user.username.lower() user.save() login(request,user) return redirect('index')", "my_images(request): current_user = request.user images = Profile.objects.filter(user_id=current_user.id).first() profiles = Image.objects.filter(user_id=current_user.id)", "render,redirect from django.contrib.auth.models import User from django.contrib import messages from", "context = {'reg_form':form} return render(request, 'memeapp/auth.html',context) def loginPage(request): page='login' if", "from django.contrib import messages from .forms import PictureUploadForm,CommentForm from .models", "return redirect('index') context = {\"form\": form} return render(request, 'memeapp/upload_picture.html', context)", "likes.save() image = Image.objects.get(id=id) image.likes_number = image.likes_number + 1 image.save()", "term\") message = \"You haven't searched for any term\" return", "return render(request,'memeapp/comments.html',context) def search(request): title = \"Search\" if 'search_query' in", "search_term = request.GET.get(\"search_query\").lower() searched_results = Image.search_image(search_term) message = f\"{search_term}\" context", "PictureUploadForm,CommentForm from .models import Image,Profile,Likes,Comments from django.contrib.auth.decorators import login_required from", "profile = Image.objects.get(pk=pk) form_results = CommentForm(request.POST,instance=profile) if request.method == \"POST\":", "= f\"{search_term}\" context = {'message': message, 'results': searched_results, 'title': title}", "context) @login_required(login_url='login') def my_images(request): current_user = request.user images = Profile.objects.filter(user_id=current_user.id).first()", "1 profile.save() return redirect('index') else: print('form is invalid') else: form_results", "if image.likes_number == 0: image.likes_number = 0 image.save() else: image.likes_number", "image.likes_number -= 1 image.save() return redirect('/') else: likes = Likes(image_id=id,", "OR Password does not exist') context={'page':page} return render(request, 'memeapp/auth.html', context)", "request.GET and request.GET[\"search_query\"]: search_term = request.GET.get(\"search_query\").lower() searched_results = Image.search_image(search_term) message", "form_results = CommentForm(request.POST,instance=profile) if request.method == \"POST\": if form_results.is_valid(): user", "any term\") message = \"You haven't searched for any term\"", "if request.method == \"POST\": form_results = PictureUploadForm(request.POST,request.FILES) if form_results.is_valid(): form_results.save()", "loginPage(request): page='login' if request.user.is_authenticated: return redirect('index') if request.method == \"POST\":", "= request.user comment= form_results.cleaned_data['comment'] comment_content = Comments(user=user, image=profile, comment=comment, created_on=datetime.now())", "form} return render(request, 'memeapp/upload_picture.html', context) @login_required(login_url='login') def my_images(request): current_user =", "import UserCreationForm from datetime import datetime def index(request): images=Image.objects.all() context={'images':images}", "return redirect('index') if request.method == \"POST\": username=request.POST.get('username').lower() password=request.POST.get('password') try: user=User.objects.get(username=username)", "form = PictureUploadForm() if request.method == \"POST\": form_results = PictureUploadForm(request.POST,request.FILES)", "=form_results.save(commit=False) user.username=user.username.lower() user.save() login(request,user) return redirect('index') else: messages.error(request, 'Error occured", "datetime import datetime def index(request): images=Image.objects.all() context={'images':images} return render(request,'memeapp/index.html',context) def", "{'image': image}) @login_required(login_url='login') def like_picture(request, id): likes = Likes.objects.filter(image_id=id).first() if", "render(request, 'memeapp/search.html', context) else: messages.error(request, \"You haven't searched for any", "return redirect('index') else: messages.error(request, 'Error occured during registration') context =", "if form_results.is_valid(): user = request.user comment= form_results.cleaned_data['comment'] comment_content = Comments(user=user,", "password=request.POST.get('password') try: user=User.objects.get(username=username) except: messages.error(request, 'User does not exist') user=authenticate(request,username=username,password=password)", "each_image(request, id): image = Image.objects.get(id=id) return render(request, 'memeapp/image_details.html', {'image': image})", "UserCreationForm from datetime import datetime def index(request): images=Image.objects.all() context={'images':images} return", "{\"profile\": images,\"images\":profiles}) @login_required(login_url='login') def each_image(request, id): image = Image.objects.get(id=id) return", "def like_picture(request, id): likes = Likes.objects.filter(image_id=id).first() if Likes.objects.filter(image_id=id, user_id=request.user.id).exists(): likes.delete()", "during registration') context = {'reg_form':form} return render(request, 'memeapp/auth.html',context) def loginPage(request):", "from .models import Image,Profile,Likes,Comments from django.contrib.auth.decorators import login_required from django.contrib", "import authenticate,login,logout from django.contrib.auth.forms import UserCreationForm from datetime import datetime", "context = {'form':form_results,'image':profile} return render(request,'memeapp/comments.html',context) def search(request): title = \"Search\"", "return redirect('index') else: messages.error(request, 'Username OR Password does not exist')", "login(request,user) return redirect('index') else: messages.error(request, 'Username OR Password does not", "def uploadPicture(request): form = PictureUploadForm() if request.method == \"POST\": form_results", "import Image,Profile,Likes,Comments from django.contrib.auth.decorators import login_required from django.contrib .auth import", "request.method == \"POST\": form_results = PictureUploadForm(request.POST,request.FILES) if form_results.is_valid(): form_results.save() return", "form=UserCreationForm() if request.method == \"POST\": form_results=UserCreationForm(request.POST) if form_results.is_valid(): user =form_results.save(commit=False)", "'memeapp/auth.html', context) def logoutUser(request): logout(request) return redirect('index') @login_required(login_url='login') def uploadPicture(request):", "redirect('/') @login_required(login_url='login') def comment(request,pk): profile = Image.objects.get(pk=pk) form_results = CommentForm(request.POST,instance=profile)", "registerPage(request): form=UserCreationForm() if request.method == \"POST\": form_results=UserCreationForm(request.POST) if form_results.is_valid(): user", "context = {\"form\": form} return render(request, 'memeapp/upload_picture.html', context) @login_required(login_url='login') def", "= request.user images = Profile.objects.filter(user_id=current_user.id).first() profiles = Image.objects.filter(user_id=current_user.id) return render(request,", "context = {'message': message, 'results': searched_results, 'title': title} return render(request,", "\"POST\": if form_results.is_valid(): user = request.user comment= form_results.cleaned_data['comment'] comment_content =", "profile.save() return redirect('index') else: print('form is invalid') else: form_results =", "{'reg_form':form} return render(request, 'memeapp/auth.html',context) def loginPage(request): page='login' if request.user.is_authenticated: return", "User from django.contrib import messages from .forms import PictureUploadForm,CommentForm from", "def search(request): title = \"Search\" if 'search_query' in request.GET and", "haven't searched for any term\") message = \"You haven't searched", "occured during registration') context = {'reg_form':form} return render(request, 'memeapp/auth.html',context) def", "return render(request, 'memeapp/image_details.html', {'image': image}) @login_required(login_url='login') def like_picture(request, id): likes", "= Image.objects.get(id=id) if image.likes_number == 0: image.likes_number = 0 image.save()", "user_id=request.user.id) likes.save() image = Image.objects.get(id=id) image.likes_number = image.likes_number + 1", "Image,Profile,Likes,Comments from django.contrib.auth.decorators import login_required from django.contrib .auth import authenticate,login,logout", "id): likes = Likes.objects.filter(image_id=id).first() if Likes.objects.filter(image_id=id, user_id=request.user.id).exists(): likes.delete() image =", "Image.objects.filter(user_id=current_user.id) return render(request, 'memeapp/profile.html', {\"profile\": images,\"images\":profiles}) @login_required(login_url='login') def each_image(request, id):", "render(request,'memeapp/comments.html',context) def search(request): title = \"Search\" if 'search_query' in request.GET", "is not None: login(request,user) return redirect('index') else: messages.error(request, 'Username OR", "= PictureUploadForm(request.POST,request.FILES) if form_results.is_valid(): form_results.save() return redirect('index') context = {\"form\":", "image = Image.objects.get(id=id) image.likes_number = image.likes_number + 1 image.save() return", "if 'search_query' in request.GET and request.GET[\"search_query\"]: search_term = request.GET.get(\"search_query\").lower() searched_results", "user=User.objects.get(username=username) except: messages.error(request, 'User does not exist') user=authenticate(request,username=username,password=password) if user", "does not exist') user=authenticate(request,username=username,password=password) if user is not None: login(request,user)", "invalid') else: form_results = CommentForm context = {'form':form_results,'image':profile} return render(request,'memeapp/comments.html',context)", "redirect('index') context = {\"form\": form} return render(request, 'memeapp/upload_picture.html', context) @login_required(login_url='login')", "= request.GET.get(\"search_query\").lower() searched_results = Image.search_image(search_term) message = f\"{search_term}\" context =", "messages.error(request, 'Username OR Password does not exist') context={'page':page} return render(request,", "{\"form\": form} return render(request, 'memeapp/upload_picture.html', context) @login_required(login_url='login') def my_images(request): current_user", "render(request, 'memeapp/image_details.html', {'image': image}) @login_required(login_url='login') def like_picture(request, id): likes =", "form_results = PictureUploadForm(request.POST,request.FILES) if form_results.is_valid(): form_results.save() return redirect('index') context =", "from django.shortcuts import render,redirect from django.contrib.auth.models import User from django.contrib", "== \"POST\": if form_results.is_valid(): user = request.user comment= form_results.cleaned_data['comment'] comment_content", "else: image.likes_number -= 1 image.save() return redirect('/') else: likes =", "Image.objects.get(id=id) return render(request, 'memeapp/image_details.html', {'image': image}) @login_required(login_url='login') def like_picture(request, id):", "\"POST\": username=request.POST.get('username').lower() password=request.POST.get('password') try: user=User.objects.get(username=username) except: messages.error(request, 'User does not", "render(request, 'memeapp/upload_picture.html', context) @login_required(login_url='login') def my_images(request): current_user = request.user images", "image}) @login_required(login_url='login') def like_picture(request, id): likes = Likes.objects.filter(image_id=id).first() if Likes.objects.filter(image_id=id,", "= Image.objects.get(id=id) return render(request, 'memeapp/image_details.html', {'image': image}) @login_required(login_url='login') def like_picture(request,", "exist') user=authenticate(request,username=username,password=password) if user is not None: login(request,user) return redirect('index')", "\"POST\": form_results = PictureUploadForm(request.POST,request.FILES) if form_results.is_valid(): form_results.save() return redirect('index') context", "if user is not None: login(request,user) return redirect('index') else: messages.error(request,", "in request.GET and request.GET[\"search_query\"]: search_term = request.GET.get(\"search_query\").lower() searched_results = Image.search_image(search_term)", "from datetime import datetime def index(request): images=Image.objects.all() context={'images':images} return render(request,'memeapp/index.html',context)", "not None: login(request,user) return redirect('index') else: messages.error(request, 'Username OR Password", "user.username=user.username.lower() user.save() login(request,user) return redirect('index') else: messages.error(request, 'Error occured during", "comment_content = Comments(user=user, image=profile, comment=comment, created_on=datetime.now()) comment_content.save() profile.comments_number = profile.comments_number", "request.user.is_authenticated: return redirect('index') if request.method == \"POST\": username=request.POST.get('username').lower() password=request.POST.get('password') try:", "searched for any term\") message = \"You haven't searched for", "request.user comment= form_results.cleaned_data['comment'] comment_content = Comments(user=user, image=profile, comment=comment, created_on=datetime.now()) comment_content.save()", "return redirect('index') @login_required(login_url='login') def uploadPicture(request): form = PictureUploadForm() if request.method", "if request.method == \"POST\": if form_results.is_valid(): user = request.user comment=", "title = \"Search\" if 'search_query' in request.GET and request.GET[\"search_query\"]: search_term", "1 image.save() return redirect('/') @login_required(login_url='login') def comment(request,pk): profile = Image.objects.get(pk=pk)", "from .forms import PictureUploadForm,CommentForm from .models import Image,Profile,Likes,Comments from django.contrib.auth.decorators", "if request.method == \"POST\": form_results=UserCreationForm(request.POST) if form_results.is_valid(): user =form_results.save(commit=False) user.username=user.username.lower()", "except: messages.error(request, 'User does not exist') user=authenticate(request,username=username,password=password) if user is", "\"POST\": form_results=UserCreationForm(request.POST) if form_results.is_valid(): user =form_results.save(commit=False) user.username=user.username.lower() user.save() login(request,user) return", "@login_required(login_url='login') def uploadPicture(request): form = PictureUploadForm() if request.method == \"POST\":", "like_picture(request, id): likes = Likes.objects.filter(image_id=id).first() if Likes.objects.filter(image_id=id, user_id=request.user.id).exists(): likes.delete() image", "image.likes_number = image.likes_number + 1 image.save() return redirect('/') @login_required(login_url='login') def", "0 image.save() else: image.likes_number -= 1 image.save() return redirect('/') else:", "datetime def index(request): images=Image.objects.all() context={'images':images} return render(request,'memeapp/index.html',context) def registerPage(request): form=UserCreationForm()", "PictureUploadForm() if request.method == \"POST\": form_results = PictureUploadForm(request.POST,request.FILES) if form_results.is_valid():", "form_results.is_valid(): form_results.save() return redirect('index') context = {\"form\": form} return render(request,", "'Error occured during registration') context = {'reg_form':form} return render(request, 'memeapp/auth.html',context)", "context) else: messages.error(request, \"You haven't searched for any term\") message", "CommentForm context = {'form':form_results,'image':profile} return render(request,'memeapp/comments.html',context) def search(request): title =", "if form_results.is_valid(): form_results.save() return redirect('index') context = {\"form\": form} return", "else: messages.error(request, 'Username OR Password does not exist') context={'page':page} return", "redirect('/') else: likes = Likes(image_id=id, user_id=request.user.id) likes.save() image = Image.objects.get(id=id)", "= {\"form\": form} return render(request, 'memeapp/upload_picture.html', context) @login_required(login_url='login') def my_images(request):", "import render,redirect from django.contrib.auth.models import User from django.contrib import messages", ".forms import PictureUploadForm,CommentForm from .models import Image,Profile,Likes,Comments from django.contrib.auth.decorators import", "for any term\") message = \"You haven't searched for any", "else: messages.error(request, \"You haven't searched for any term\") message =", "from django.contrib.auth.forms import UserCreationForm from datetime import datetime def index(request):", "user =form_results.save(commit=False) user.username=user.username.lower() user.save() login(request,user) return redirect('index') else: messages.error(request, 'Error", "import datetime def index(request): images=Image.objects.all() context={'images':images} return render(request,'memeapp/index.html',context) def registerPage(request):", "= CommentForm(request.POST,instance=profile) if request.method == \"POST\": if form_results.is_valid(): user =", "= Comments(user=user, image=profile, comment=comment, created_on=datetime.now()) comment_content.save() profile.comments_number = profile.comments_number +", "print('form is invalid') else: form_results = CommentForm context = {'form':form_results,'image':profile}", "return redirect('index') else: print('form is invalid') else: form_results = CommentForm", "'memeapp/auth.html',context) def loginPage(request): page='login' if request.user.is_authenticated: return redirect('index') if request.method", "== \"POST\": form_results=UserCreationForm(request.POST) if form_results.is_valid(): user =form_results.save(commit=False) user.username=user.username.lower() user.save() login(request,user)", "message, 'results': searched_results, 'title': title} return render(request, 'memeapp/search.html', context) else:", "return render(request, 'memeapp/profile.html', {\"profile\": images,\"images\":profiles}) @login_required(login_url='login') def each_image(request, id): image", "Likes(image_id=id, user_id=request.user.id) likes.save() image = Image.objects.get(id=id) image.likes_number = image.likes_number +", "return redirect('/') else: likes = Likes(image_id=id, user_id=request.user.id) likes.save() image =", "return render(request, 'memeapp/auth.html',context) def loginPage(request): page='login' if request.user.is_authenticated: return redirect('index')", "def loginPage(request): page='login' if request.user.is_authenticated: return redirect('index') if request.method ==", "likes = Likes.objects.filter(image_id=id).first() if Likes.objects.filter(image_id=id, user_id=request.user.id).exists(): likes.delete() image = Image.objects.get(id=id)", ".models import Image,Profile,Likes,Comments from django.contrib.auth.decorators import login_required from django.contrib .auth", "redirect('index') @login_required(login_url='login') def uploadPicture(request): form = PictureUploadForm() if request.method ==", "username=request.POST.get('username').lower() password=request.POST.get('password') try: user=User.objects.get(username=username) except: messages.error(request, 'User does not exist')", "+ 1 profile.save() return redirect('index') else: print('form is invalid') else:", "context) def logoutUser(request): logout(request) return redirect('index') @login_required(login_url='login') def uploadPicture(request): form", "Image.objects.get(pk=pk) form_results = CommentForm(request.POST,instance=profile) if request.method == \"POST\": if form_results.is_valid():", "django.shortcuts import render,redirect from django.contrib.auth.models import User from django.contrib import", "= Likes.objects.filter(image_id=id).first() if Likes.objects.filter(image_id=id, user_id=request.user.id).exists(): likes.delete() image = Image.objects.get(id=id) if", "import login_required from django.contrib .auth import authenticate,login,logout from django.contrib.auth.forms import", "request.method == \"POST\": form_results=UserCreationForm(request.POST) if form_results.is_valid(): user =form_results.save(commit=False) user.username=user.username.lower() user.save()", "= image.likes_number + 1 image.save() return redirect('/') @login_required(login_url='login') def comment(request,pk):", "'User does not exist') user=authenticate(request,username=username,password=password) if user is not None:", "= 0 image.save() else: image.likes_number -= 1 image.save() return redirect('/')", "django.contrib import messages from .forms import PictureUploadForm,CommentForm from .models import", "user is not None: login(request,user) return redirect('index') else: messages.error(request, 'Username", "\"You haven't searched for any term\" return render(request, 'memeapp/search.html', {\"message\":", "-= 1 image.save() return redirect('/') else: likes = Likes(image_id=id, user_id=request.user.id)", "image=profile, comment=comment, created_on=datetime.now()) comment_content.save() profile.comments_number = profile.comments_number + 1 profile.save()", "Comments(user=user, image=profile, comment=comment, created_on=datetime.now()) comment_content.save() profile.comments_number = profile.comments_number + 1", "not exist') user=authenticate(request,username=username,password=password) if user is not None: login(request,user) return", "'search_query' in request.GET and request.GET[\"search_query\"]: search_term = request.GET.get(\"search_query\").lower() searched_results =", "messages from .forms import PictureUploadForm,CommentForm from .models import Image,Profile,Likes,Comments from", "context={'images':images} return render(request,'memeapp/index.html',context) def registerPage(request): form=UserCreationForm() if request.method == \"POST\":", "uploadPicture(request): form = PictureUploadForm() if request.method == \"POST\": form_results =", "messages.error(request, 'Error occured during registration') context = {'reg_form':form} return render(request,", "Image.search_image(search_term) message = f\"{search_term}\" context = {'message': message, 'results': searched_results,", "search(request): title = \"Search\" if 'search_query' in request.GET and request.GET[\"search_query\"]:", "haven't searched for any term\" return render(request, 'memeapp/search.html', {\"message\": message})", "= CommentForm context = {'form':form_results,'image':profile} return render(request,'memeapp/comments.html',context) def search(request): title", "@login_required(login_url='login') def my_images(request): current_user = request.user images = Profile.objects.filter(user_id=current_user.id).first() profiles", "= PictureUploadForm() if request.method == \"POST\": form_results = PictureUploadForm(request.POST,request.FILES) if", "= \"You haven't searched for any term\" return render(request, 'memeapp/search.html',", "user = request.user comment= form_results.cleaned_data['comment'] comment_content = Comments(user=user, image=profile, comment=comment,", "else: likes = Likes(image_id=id, user_id=request.user.id) likes.save() image = Image.objects.get(id=id) image.likes_number", "message = f\"{search_term}\" context = {'message': message, 'results': searched_results, 'title':", "redirect('index') if request.method == \"POST\": username=request.POST.get('username').lower() password=request.POST.get('password') try: user=User.objects.get(username=username) except:", "Likes.objects.filter(image_id=id).first() if Likes.objects.filter(image_id=id, user_id=request.user.id).exists(): likes.delete() image = Image.objects.get(id=id) if image.likes_number", "import User from django.contrib import messages from .forms import PictureUploadForm,CommentForm", "== \"POST\": form_results = PictureUploadForm(request.POST,request.FILES) if form_results.is_valid(): form_results.save() return redirect('index')", "@login_required(login_url='login') def comment(request,pk): profile = Image.objects.get(pk=pk) form_results = CommentForm(request.POST,instance=profile) if", "images = Profile.objects.filter(user_id=current_user.id).first() profiles = Image.objects.filter(user_id=current_user.id) return render(request, 'memeapp/profile.html', {\"profile\":", "from django.contrib.auth.decorators import login_required from django.contrib .auth import authenticate,login,logout from", "import messages from .forms import PictureUploadForm,CommentForm from .models import Image,Profile,Likes,Comments", "messages.error(request, 'User does not exist') user=authenticate(request,username=username,password=password) if user is not", "does not exist') context={'page':page} return render(request, 'memeapp/auth.html', context) def logoutUser(request):", "@login_required(login_url='login') def like_picture(request, id): likes = Likes.objects.filter(image_id=id).first() if Likes.objects.filter(image_id=id, user_id=request.user.id).exists():", "likes.delete() image = Image.objects.get(id=id) if image.likes_number == 0: image.likes_number =", "Image.objects.get(id=id) image.likes_number = image.likes_number + 1 image.save() return redirect('/') @login_required(login_url='login')", "'results': searched_results, 'title': title} return render(request, 'memeapp/search.html', context) else: messages.error(request,", "= profile.comments_number + 1 profile.save() return redirect('index') else: print('form is", "+ 1 image.save() return redirect('/') @login_required(login_url='login') def comment(request,pk): profile =", "image.likes_number == 0: image.likes_number = 0 image.save() else: image.likes_number -=", "and request.GET[\"search_query\"]: search_term = request.GET.get(\"search_query\").lower() searched_results = Image.search_image(search_term) message =", "else: form_results = CommentForm context = {'form':form_results,'image':profile} return render(request,'memeapp/comments.html',context) def", "def index(request): images=Image.objects.all() context={'images':images} return render(request,'memeapp/index.html',context) def registerPage(request): form=UserCreationForm() if", "if form_results.is_valid(): user =form_results.save(commit=False) user.username=user.username.lower() user.save() login(request,user) return redirect('index') else:", "comment= form_results.cleaned_data['comment'] comment_content = Comments(user=user, image=profile, comment=comment, created_on=datetime.now()) comment_content.save() profile.comments_number", "created_on=datetime.now()) comment_content.save() profile.comments_number = profile.comments_number + 1 profile.save() return redirect('index')", "def each_image(request, id): image = Image.objects.get(id=id) return render(request, 'memeapp/image_details.html', {'image':", "'Username OR Password does not exist') context={'page':page} return render(request, 'memeapp/auth.html',", "profile.comments_number + 1 profile.save() return redirect('index') else: print('form is invalid')", "\"Search\" if 'search_query' in request.GET and request.GET[\"search_query\"]: search_term = request.GET.get(\"search_query\").lower()", "django.contrib.auth.decorators import login_required from django.contrib .auth import authenticate,login,logout from django.contrib.auth.forms", "Image.objects.get(id=id) if image.likes_number == 0: image.likes_number = 0 image.save() else:", "id): image = Image.objects.get(id=id) return render(request, 'memeapp/image_details.html', {'image': image}) @login_required(login_url='login')", "0: image.likes_number = 0 image.save() else: image.likes_number -= 1 image.save()", "image.save() return redirect('/') else: likes = Likes(image_id=id, user_id=request.user.id) likes.save() image", "return render(request, 'memeapp/upload_picture.html', context) @login_required(login_url='login') def my_images(request): current_user = request.user", "None: login(request,user) return redirect('index') else: messages.error(request, 'Username OR Password does", "profiles = Image.objects.filter(user_id=current_user.id) return render(request, 'memeapp/profile.html', {\"profile\": images,\"images\":profiles}) @login_required(login_url='login') def", "django.contrib .auth import authenticate,login,logout from django.contrib.auth.forms import UserCreationForm from datetime", "image = Image.objects.get(id=id) return render(request, 'memeapp/image_details.html', {'image': image}) @login_required(login_url='login') def", "django.contrib.auth.models import User from django.contrib import messages from .forms import", "image.likes_number = 0 image.save() else: image.likes_number -= 1 image.save() return", "{'form':form_results,'image':profile} return render(request,'memeapp/comments.html',context) def search(request): title = \"Search\" if 'search_query'", "request.method == \"POST\": username=request.POST.get('username').lower() password=request.POST.get('password') try: user=User.objects.get(username=username) except: messages.error(request, 'User", "index(request): images=Image.objects.all() context={'images':images} return render(request,'memeapp/index.html',context) def registerPage(request): form=UserCreationForm() if request.method", "import PictureUploadForm,CommentForm from .models import Image,Profile,Likes,Comments from django.contrib.auth.decorators import login_required", "def registerPage(request): form=UserCreationForm() if request.method == \"POST\": form_results=UserCreationForm(request.POST) if form_results.is_valid():", "messages.error(request, \"You haven't searched for any term\") message = \"You", "= Profile.objects.filter(user_id=current_user.id).first() profiles = Image.objects.filter(user_id=current_user.id) return render(request, 'memeapp/profile.html', {\"profile\": images,\"images\":profiles})", "'memeapp/image_details.html', {'image': image}) @login_required(login_url='login') def like_picture(request, id): likes = Likes.objects.filter(image_id=id).first()", "user=authenticate(request,username=username,password=password) if user is not None: login(request,user) return redirect('index') else:", "== 0: image.likes_number = 0 image.save() else: image.likes_number -= 1", "if request.user.is_authenticated: return redirect('index') if request.method == \"POST\": username=request.POST.get('username').lower() password=request.POST.get('password')", "= Likes(image_id=id, user_id=request.user.id) likes.save() image = Image.objects.get(id=id) image.likes_number = image.likes_number", "return redirect('/') @login_required(login_url='login') def comment(request,pk): profile = Image.objects.get(pk=pk) form_results =", "def logoutUser(request): logout(request) return redirect('index') @login_required(login_url='login') def uploadPicture(request): form =", "form_results.is_valid(): user = request.user comment= form_results.cleaned_data['comment'] comment_content = Comments(user=user, image=profile,", "render(request, 'memeapp/profile.html', {\"profile\": images,\"images\":profiles}) @login_required(login_url='login') def each_image(request, id): image =", "if Likes.objects.filter(image_id=id, user_id=request.user.id).exists(): likes.delete() image = Image.objects.get(id=id) if image.likes_number ==", "image.save() else: image.likes_number -= 1 image.save() return redirect('/') else: likes", "image.likes_number + 1 image.save() return redirect('/') @login_required(login_url='login') def comment(request,pk): profile", "comment_content.save() profile.comments_number = profile.comments_number + 1 profile.save() return redirect('index') else:", "{'message': message, 'results': searched_results, 'title': title} return render(request, 'memeapp/search.html', context)", "Password does not exist') context={'page':page} return render(request, 'memeapp/auth.html', context) def", "profile.comments_number = profile.comments_number + 1 profile.save() return redirect('index') else: print('form", "message = \"You haven't searched for any term\" return render(request,", "= {'message': message, 'results': searched_results, 'title': title} return render(request, 'memeapp/search.html',", "def comment(request,pk): profile = Image.objects.get(pk=pk) form_results = CommentForm(request.POST,instance=profile) if request.method", "form_results.is_valid(): user =form_results.save(commit=False) user.username=user.username.lower() user.save() login(request,user) return redirect('index') else: messages.error(request,", "images,\"images\":profiles}) @login_required(login_url='login') def each_image(request, id): image = Image.objects.get(id=id) return render(request,", "'memeapp/search.html', context) else: messages.error(request, \"You haven't searched for any term\")", "searched_results = Image.search_image(search_term) message = f\"{search_term}\" context = {'message': message,", "def my_images(request): current_user = request.user images = Profile.objects.filter(user_id=current_user.id).first() profiles =" ]
[ "XML dictionary and save as a pickle file.\"\"\" # Download", "lexicon.\"\"\" s = self.lookup(word) if s and s[0] == \"exactMatch\":", "if elem.tag == 'LexicalEntry': lemma = elem.find(\"Lemma\") dalin, saldo =", "= \"^1\" # @annotator(\"Diapivot annotation\", language=[\"swe-1800\"]) def diapivot_annotate(out: Output =", "in lemgrams.split(util.DELIM): s_i = lexicon.get_exactMatch(lemgram) if s_i: saldo_ids += [s_i]", "import logging import pickle import xml.etree.ElementTree as etree import sparv.util", "lemgrams.append(PART_DELIM1.join([saldo, match])) picklex[lem] = sorted(lemgrams) out.write_pickle(picklex) # Clean up xml_model.remove()", "if s and s[0] == \"exactMatch\": return s[1] def _split_val(key_val):", "IDs corresponding to lemgrams\"), lemgram: Annotation = Annotation(\"<token>:saldo.lemgram\"), model: Model", "{} for lem in xml_lexicon: lemgrams = [] for saldo,", "import sparv.util as util from sparv import Annotation, Model, ModelOutput,", "list(xml_lexicon[lem].items()): lemgrams.append(PART_DELIM1.join([saldo, match])) picklex[lem] = sorted(lemgrams) out.write_pickle(picklex) # Clean up", "len(self.lexicon)) def lookup(self, lem): \"\"\"Lookup a word in the lexicon.\"\"\"", "xml_model = Model(\"hist/diapivot.xml\") xml_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml\") # Create pickle file xml_lexicon =", "import Annotation, Model, ModelOutput, Output, annotator, modelbuilder log = logging.getLogger(__name__)", "a word in the lexicon.\"\"\" if lem.lower() == lem: annotation_tag_pairs", "\"^1\" # @annotator(\"Diapivot annotation\", language=[\"swe-1800\"]) def diapivot_annotate(out: Output = Output(\"<token>:hist.diapivot\",", "for form in lemma.findall(\"FormRepresentation\"): cat = _findval(form, \"category\") lem =", "XML lexicon\") lexicon = {} context = etree.iterparse(xml, events=(\"start\", \"end\"))", "iter(context) _event, root = next(context) for event, elem in context:", "file. Defaults to Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to lemgrams\"). lemgram", "self.lexicon.get(lem, []) + self.lexicon.get(lem.lower(), []) return list(map(_split_val, annotation_tag_pairs)) def get_exactMatch(self,", "out_annotation = [] for lemgrams in lemgram_annotation: saldo_ids = []", "as a pickle file.\"\"\" # Download diapivot.xml xml_model = Model(\"hist/diapivot.xml\")", "log = logging.getLogger(__name__) PART_DELIM1 = \"^1\" # @annotator(\"Diapivot annotation\", language=[\"swe-1800\"])", "lem: annotation_tag_pairs = self.lexicon.get(lem, []) else: annotation_tag_pairs = self.lexicon.get(lem, [])", "# Auxiliaries ################################################################################ class PivotLexicon: \"\"\"A lexicon for old swedish", "def build_diapivot(out: ModelOutput = ModelOutput(\"hist/diapivot.pickle\")): \"\"\"Download diapivot XML dictionary and", "to Model(\"hist/diapivot.pickle\"). \"\"\" lexicon = PivotLexicon(model) lemgram_annotation = list(lemgram.read()) out_annotation", "xml_lexicon = read_xml(xml_model.path) log.info(\"Saving cross lexicon in Pickle format\") picklex", "annotation.\"\"\" import logging import pickle import xml.etree.ElementTree as etree import", "and s[0] == \"exactMatch\": return s[1] def _split_val(key_val): return key_val.rsplit(PART_DELIM1)[1]", "cross lexicon in Pickle format\") picklex = {} for lem", "annotation_tag_pairs)) def get_exactMatch(self, word): \"\"\"Get only exact matches from lexicon.\"\"\"", "of crosslinked lexicon.\"\"\" log.info(\"Reading XML lexicon\") lexicon = {} context", "to lemgrams\"), lemgram: Annotation = Annotation(\"<token>:saldo.lemgram\"), model: Model = Model(\"hist/diapivot.pickle\")):", "= logging.getLogger(__name__) PART_DELIM1 = \"^1\" # @annotator(\"Diapivot annotation\", language=[\"swe-1800\"]) def", "ModelOutput = ModelOutput(\"hist/diapivot.pickle\")): \"\"\"Download diapivot XML dictionary and save as", "lemma.findall(\"FormRepresentation\"): cat = _findval(form, \"category\") lem = _findval(form, \"lemgram\") if", "@annotator(\"Diapivot annotation\", language=[\"swe-1800\"]) def diapivot_annotate(out: Output = Output(\"<token>:hist.diapivot\", description=\"SALDO IDs", "lemgram (str, optional): Existing lemgram annotation. Defaults to Annotation(\"<token>:saldo.lemgram\"). model", "################################################################################ class PivotLexicon: \"\"\"A lexicon for old swedish SALDO lookups.", "m) in dalin] # Done parsing section. Clear tree to", "Auxiliaries ################################################################################ class PivotLexicon: \"\"\"A lexicon for old swedish SALDO", "[(lem, match)] [lexicon.update({d: {'saldo': saldo, 'match': m}}) for (d, m)", "def _split_val(key_val): return key_val.rsplit(PART_DELIM1)[1] def read_xml(xml): \"\"\"Read the XML version", "Clear tree to save memory if elem.tag in ['LexicalEntry', 'frame',", "\"\"\"Read the XML version of crosslinked lexicon.\"\"\" log.info(\"Reading XML lexicon\")", "Model, ModelOutput, Output, annotator, modelbuilder log = logging.getLogger(__name__) PART_DELIM1 =", "form in lemma.findall(\"FormRepresentation\"): cat = _findval(form, \"category\") lem = _findval(form,", "read_xml(xml): \"\"\"Read the XML version of crosslinked lexicon.\"\"\" log.info(\"Reading XML", "Model(\"hist/diapivot.pickle\")): \"\"\"Annotate each lemgram with its corresponding saldo_id according to", "sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder log =", "def read_xml(xml): \"\"\"Read the XML version of crosslinked lexicon.\"\"\" log.info(\"Reading", "root.clear() testwords = [\"tigerhjerta..nn.1\", \"lågland..nn.1\", \"gud..nn.1\"] util.test_lexicon(lexicon, testwords) log.info(\"OK, read\")", "== \"end\": if elem.tag == 'LexicalEntry': lemma = elem.find(\"Lemma\") dalin,", "= pickle.load(F) if verbose: log.info(\"OK, read %d words\", len(self.lexicon)) def", "s_i: saldo_ids += [s_i] out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) + util.AFFIX if", "SALDO lookups. It is initialized from a pickled file. \"\"\"", "elems: att = form.get(\"att\", \"\") if att == key: return", "etree.iterparse(xml, events=(\"start\", \"end\")) # \"start\" needed to save reference to", "list(lemgram.read()) out_annotation = [] for lemgrams in lemgram_annotation: saldo_ids =", "logging.getLogger(__name__) PART_DELIM1 = \"^1\" # @annotator(\"Diapivot annotation\", language=[\"swe-1800\"]) def diapivot_annotate(out:", "def lookup(self, lem): \"\"\"Lookup a word in the lexicon.\"\"\" if", "the XML version of crosslinked lexicon.\"\"\" log.info(\"Reading XML lexicon\") lexicon", "self.lookup(word) if s and s[0] == \"exactMatch\": return s[1] def", "s[1] def _split_val(key_val): return key_val.rsplit(PART_DELIM1)[1] def read_xml(xml): \"\"\"Read the XML", "lookups. It is initialized from a pickled file. \"\"\" def", "lemgrams.split(util.DELIM): s_i = lexicon.get_exactMatch(lemgram) if s_i: saldo_ids += [s_i] out_annotation.append(util.AFFIX", "out.write_pickle(picklex) # Clean up xml_model.remove() ################################################################################ # Auxiliaries ################################################################################ class", "from lexicon.\"\"\" s = self.lookup(word) if s and s[0] ==", "pickled file. \"\"\" def __init__(self, crossfile, verbose=True): \"\"\"Read pickled lexicon.\"\"\"", "to Annotation(\"<token>:saldo.lemgram\"). model (str, optional): Crosslink model. Defaults to Model(\"hist/diapivot.pickle\").", "optional): Resulting annotation file. Defaults to Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding", "self.lexicon.get(lem.lower(), []) return list(map(_split_val, annotation_tag_pairs)) def get_exactMatch(self, word): \"\"\"Get only", "= sorted(lemgrams) out.write_pickle(picklex) # Clean up xml_model.remove() ################################################################################ # Auxiliaries", "Crosslink model. Defaults to Model(\"hist/diapivot.pickle\"). \"\"\" lexicon = PivotLexicon(model) lemgram_annotation", "in lemgram_annotation: saldo_ids = [] for lemgram in lemgrams.split(util.DELIM): s_i", "# Download diapivot.xml xml_model = Model(\"hist/diapivot.xml\") xml_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml\") # Create pickle", "dalin += [(lem, match)] [lexicon.update({d: {'saldo': saldo, 'match': m}}) for", "= Model(\"hist/diapivot.xml\") xml_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml\") # Create pickle file xml_lexicon = read_xml(xml_model.path)", "in lemma.findall(\"FormRepresentation\"): cat = _findval(form, \"category\") lem = _findval(form, \"lemgram\")", "util.DELIM.join(set(saldo_ids)) + util.AFFIX if saldo_ids else util.AFFIX) out.write(out_annotation) # @modelbuilder(\"Diapivot", "annotation_tag_pairs = self.lexicon.get(lem, []) else: annotation_tag_pairs = self.lexicon.get(lem, []) +", "if event == \"end\": if elem.tag == 'LexicalEntry': lemma =", "sorted(lemgrams) out.write_pickle(picklex) # Clean up xml_model.remove() ################################################################################ # Auxiliaries ################################################################################", "(d, m) in dalin] # Done parsing section. Clear tree", "lem in xml_lexicon: lemgrams = [] for saldo, match in", "else: annotation_tag_pairs = self.lexicon.get(lem, []) + self.lexicon.get(lem.lower(), []) return list(map(_split_val,", "cat == \"modern\": saldo = lem else: match = _findval(form,", "is initialized from a pickled file. \"\"\" def __init__(self, crossfile,", "as etree import sparv.util as util from sparv import Annotation,", "lookup(self, lem): \"\"\"Lookup a word in the lexicon.\"\"\" if lem.lower()", "\"end\")) # \"start\" needed to save reference to root element", "if saldo_ids else util.AFFIX) out.write(out_annotation) # @modelbuilder(\"Diapivot model\", language=[\"swe\"]) def", "== lem: annotation_tag_pairs = self.lexicon.get(lem, []) else: annotation_tag_pairs = self.lexicon.get(lem,", "match in list(xml_lexicon[lem].items()): lemgrams.append(PART_DELIM1.join([saldo, match])) picklex[lem] = sorted(lemgrams) out.write_pickle(picklex) #", "build_diapivot(out: ModelOutput = ModelOutput(\"hist/diapivot.pickle\")): \"\"\"Download diapivot XML dictionary and save", "in ['LexicalEntry', 'frame', 'resFrame']: root.clear() testwords = [\"tigerhjerta..nn.1\", \"lågland..nn.1\", \"gud..nn.1\"]", "pickled lexicon.\"\"\" if verbose: log.info(\"Reading cross lexicon: %s\", crossfile) with", "exact matches from lexicon.\"\"\" s = self.lookup(word) if s and", "lemgrams = [] for saldo, match in list(xml_lexicon[lem].items()): lemgrams.append(PART_DELIM1.join([saldo, match]))", "needed to save reference to root element context = iter(context)", "saldo_ids else util.AFFIX) out.write(out_annotation) # @modelbuilder(\"Diapivot model\", language=[\"swe\"]) def build_diapivot(out:", "= _findval(form, \"match\") dalin += [(lem, match)] [lexicon.update({d: {'saldo': saldo,", "util.AFFIX) out.write(out_annotation) # @modelbuilder(\"Diapivot model\", language=[\"swe\"]) def build_diapivot(out: ModelOutput =", "for form in elems: att = form.get(\"att\", \"\") if att", "lemgram: Annotation = Annotation(\"<token>:saldo.lemgram\"), model: Model = Model(\"hist/diapivot.pickle\")): \"\"\"Annotate each", "lemgrams\"), lemgram: Annotation = Annotation(\"<token>:saldo.lemgram\"), model: Model = Model(\"hist/diapivot.pickle\")): \"\"\"Annotate", "and save as a pickle file.\"\"\" # Download diapivot.xml xml_model", "dalin, saldo = [], '' for form in lemma.findall(\"FormRepresentation\"): cat", "lemgrams\"). lemgram (str, optional): Existing lemgram annotation. Defaults to Annotation(\"<token>:saldo.lemgram\").", "ModelOutput(\"hist/diapivot.pickle\")): \"\"\"Download diapivot XML dictionary and save as a pickle", "[]) else: annotation_tag_pairs = self.lexicon.get(lem, []) + self.lexicon.get(lem.lower(), []) return", "= _findval(form, \"category\") lem = _findval(form, \"lemgram\") if cat ==", "if lem.lower() == lem: annotation_tag_pairs = self.lexicon.get(lem, []) else: annotation_tag_pairs", "match = _findval(form, \"match\") dalin += [(lem, match)] [lexicon.update({d: {'saldo':", "PivotLexicon: \"\"\"A lexicon for old swedish SALDO lookups. It is", "XML version of crosslinked lexicon.\"\"\" log.info(\"Reading XML lexicon\") lexicon =", "_findval(form, \"lemgram\") if cat == \"modern\": saldo = lem else:", "context = iter(context) _event, root = next(context) for event, elem", "its corresponding saldo_id according to model. Args: out (str, optional):", "list(map(_split_val, annotation_tag_pairs)) def get_exactMatch(self, word): \"\"\"Get only exact matches from", "Resulting annotation file. Defaults to Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to", "s = self.lookup(word) if s and s[0] == \"exactMatch\": return", "log.info(\"OK, read %d words\", len(self.lexicon)) def lookup(self, lem): \"\"\"Lookup a", "events=(\"start\", \"end\")) # \"start\" needed to save reference to root", "lem): \"\"\"Lookup a word in the lexicon.\"\"\" if lem.lower() ==", "+ util.AFFIX if saldo_ids else util.AFFIX) out.write(out_annotation) # @modelbuilder(\"Diapivot model\",", "root element context = iter(context) _event, root = next(context) for", "class PivotLexicon: \"\"\"A lexicon for old swedish SALDO lookups. It", "picklex[lem] = sorted(lemgrams) out.write_pickle(picklex) # Clean up xml_model.remove() ################################################################################ #", "\"\"\" def __init__(self, crossfile, verbose=True): \"\"\"Read pickled lexicon.\"\"\" if verbose:", "# Create pickle file xml_lexicon = read_xml(xml_model.path) log.info(\"Saving cross lexicon", "[]) return list(map(_split_val, annotation_tag_pairs)) def get_exactMatch(self, word): \"\"\"Get only exact", "= Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to lemgrams\"), lemgram: Annotation =", "\"\"\"Get only exact matches from lexicon.\"\"\" s = self.lookup(word) if", "['LexicalEntry', 'frame', 'resFrame']: root.clear() testwords = [\"tigerhjerta..nn.1\", \"lågland..nn.1\", \"gud..nn.1\"] util.test_lexicon(lexicon,", "in context: if event == \"end\": if elem.tag == 'LexicalEntry':", "Annotation(\"<token>:saldo.lemgram\"). model (str, optional): Crosslink model. Defaults to Model(\"hist/diapivot.pickle\"). \"\"\"", "optional): Existing lemgram annotation. Defaults to Annotation(\"<token>:saldo.lemgram\"). model (str, optional):", "[\"tigerhjerta..nn.1\", \"lågland..nn.1\", \"gud..nn.1\"] util.test_lexicon(lexicon, testwords) log.info(\"OK, read\") return lexicon def", "event == \"end\": if elem.tag == 'LexicalEntry': lemma = elem.find(\"Lemma\")", "@modelbuilder(\"Diapivot model\", language=[\"swe\"]) def build_diapivot(out: ModelOutput = ModelOutput(\"hist/diapivot.pickle\")): \"\"\"Download diapivot", "annotation_tag_pairs = self.lexicon.get(lem, []) + self.lexicon.get(lem.lower(), []) return list(map(_split_val, annotation_tag_pairs))", "saldo_ids += [s_i] out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) + util.AFFIX if saldo_ids", "words\", len(self.lexicon)) def lookup(self, lem): \"\"\"Lookup a word in the", "+ self.lexicon.get(lem.lower(), []) return list(map(_split_val, annotation_tag_pairs)) def get_exactMatch(self, word): \"\"\"Get", "self.lexicon.get(lem, []) else: annotation_tag_pairs = self.lexicon.get(lem, []) + self.lexicon.get(lem.lower(), [])", "if cat == \"modern\": saldo = lem else: match =", "context: if event == \"end\": if elem.tag == 'LexicalEntry': lemma", "for saldo, match in list(xml_lexicon[lem].items()): lemgrams.append(PART_DELIM1.join([saldo, match])) picklex[lem] = sorted(lemgrams)", "crossfile, verbose=True): \"\"\"Read pickled lexicon.\"\"\" if verbose: log.info(\"Reading cross lexicon:", "lemgram_annotation: saldo_ids = [] for lemgram in lemgrams.split(util.DELIM): s_i =", "saldo_ids = [] for lemgram in lemgrams.split(util.DELIM): s_i = lexicon.get_exactMatch(lemgram)", "open(crossfile, \"rb\") as F: self.lexicon = pickle.load(F) if verbose: log.info(\"OK,", "_findval(form, \"category\") lem = _findval(form, \"lemgram\") if cat == \"modern\":", "[s_i] out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) + util.AFFIX if saldo_ids else util.AFFIX)", "lexicon.\"\"\" if lem.lower() == lem: annotation_tag_pairs = self.lexicon.get(lem, []) else:", "import pickle import xml.etree.ElementTree as etree import sparv.util as util", "= [\"tigerhjerta..nn.1\", \"lågland..nn.1\", \"gud..nn.1\"] util.test_lexicon(lexicon, testwords) log.info(\"OK, read\") return lexicon", "Existing lemgram annotation. Defaults to Annotation(\"<token>:saldo.lemgram\"). model (str, optional): Crosslink", "as F: self.lexicon = pickle.load(F) if verbose: log.info(\"OK, read %d", "verbose=True): \"\"\"Read pickled lexicon.\"\"\" if verbose: log.info(\"Reading cross lexicon: %s\",", "lexicon = PivotLexicon(model) lemgram_annotation = list(lemgram.read()) out_annotation = [] for", "= read_xml(xml_model.path) log.info(\"Saving cross lexicon in Pickle format\") picklex =", "out.write(out_annotation) # @modelbuilder(\"Diapivot model\", language=[\"swe\"]) def build_diapivot(out: ModelOutput = ModelOutput(\"hist/diapivot.pickle\")):", "key): for form in elems: att = form.get(\"att\", \"\") if", "lexicon.\"\"\" if verbose: log.info(\"Reading cross lexicon: %s\", crossfile) with open(crossfile,", "\"gud..nn.1\"] util.test_lexicon(lexicon, testwords) log.info(\"OK, read\") return lexicon def _findval(elems, key):", "Model = Model(\"hist/diapivot.pickle\")): \"\"\"Annotate each lemgram with its corresponding saldo_id", "return key_val.rsplit(PART_DELIM1)[1] def read_xml(xml): \"\"\"Read the XML version of crosslinked", "Create pickle file xml_lexicon = read_xml(xml_model.path) log.info(\"Saving cross lexicon in", "lemgram with its corresponding saldo_id according to model. Args: out", "[lexicon.update({d: {'saldo': saldo, 'match': m}}) for (d, m) in dalin]", "Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to lemgrams\"), lemgram: Annotation = Annotation(\"<token>:saldo.lemgram\"),", "= next(context) for event, elem in context: if event ==", "swedish SALDO lookups. It is initialized from a pickled file.", "log.info(\"Reading XML lexicon\") lexicon = {} context = etree.iterparse(xml, events=(\"start\",", "= self.lexicon.get(lem, []) else: annotation_tag_pairs = self.lexicon.get(lem, []) + self.lexicon.get(lem.lower(),", "annotation file. Defaults to Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to lemgrams\").", "match)] [lexicon.update({d: {'saldo': saldo, 'match': m}}) for (d, m) in", "matches from lexicon.\"\"\" s = self.lookup(word) if s and s[0]", "if verbose: log.info(\"OK, read %d words\", len(self.lexicon)) def lookup(self, lem):", "for lemgrams in lemgram_annotation: saldo_ids = [] for lemgram in", "\"\"\" lexicon = PivotLexicon(model) lemgram_annotation = list(lemgram.read()) out_annotation = []", "for (d, m) in dalin] # Done parsing section. Clear", "parsing section. Clear tree to save memory if elem.tag in", "\"lågland..nn.1\", \"gud..nn.1\"] util.test_lexicon(lexicon, testwords) log.info(\"OK, read\") return lexicon def _findval(elems,", "Output, annotator, modelbuilder log = logging.getLogger(__name__) PART_DELIM1 = \"^1\" #", "description=\"SALDO IDs corresponding to lemgrams\"), lemgram: Annotation = Annotation(\"<token>:saldo.lemgram\"), model:", "= _findval(form, \"lemgram\") if cat == \"modern\": saldo = lem", "if verbose: log.info(\"Reading cross lexicon: %s\", crossfile) with open(crossfile, \"rb\")", "__init__(self, crossfile, verbose=True): \"\"\"Read pickled lexicon.\"\"\" if verbose: log.info(\"Reading cross", "lexicon.\"\"\" log.info(\"Reading XML lexicon\") lexicon = {} context = etree.iterparse(xml,", "modelbuilder log = logging.getLogger(__name__) PART_DELIM1 = \"^1\" # @annotator(\"Diapivot annotation\",", "etree import sparv.util as util from sparv import Annotation, Model,", "reference to root element context = iter(context) _event, root =", "= [], '' for form in lemma.findall(\"FormRepresentation\"): cat = _findval(form,", "Clean up xml_model.remove() ################################################################################ # Auxiliaries ################################################################################ class PivotLexicon: \"\"\"A", "pickle.load(F) if verbose: log.info(\"OK, read %d words\", len(self.lexicon)) def lookup(self,", "util from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder", "read_xml(xml_model.path) log.info(\"Saving cross lexicon in Pickle format\") picklex = {}", "model. Defaults to Model(\"hist/diapivot.pickle\"). \"\"\" lexicon = PivotLexicon(model) lemgram_annotation =", "\"\"\"Download diapivot XML dictionary and save as a pickle file.\"\"\"", "== \"modern\": saldo = lem else: match = _findval(form, \"match\")", "Annotation(\"<token>:saldo.lemgram\"), model: Model = Model(\"hist/diapivot.pickle\")): \"\"\"Annotate each lemgram with its", "pickle file.\"\"\" # Download diapivot.xml xml_model = Model(\"hist/diapivot.xml\") xml_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml\") #", "return lexicon def _findval(elems, key): for form in elems: att", "_findval(elems, key): for form in elems: att = form.get(\"att\", \"\")", "a pickle file.\"\"\" # Download diapivot.xml xml_model = Model(\"hist/diapivot.xml\") xml_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml\")", "\"lemgram\") if cat == \"modern\": saldo = lem else: match", "\"\"\"Lookup a word in the lexicon.\"\"\" if lem.lower() == lem:", "\"\"\"Read pickled lexicon.\"\"\" if verbose: log.info(\"Reading cross lexicon: %s\", crossfile)", "_split_val(key_val): return key_val.rsplit(PART_DELIM1)[1] def read_xml(xml): \"\"\"Read the XML version of", "key_val.rsplit(PART_DELIM1)[1] def read_xml(xml): \"\"\"Read the XML version of crosslinked lexicon.\"\"\"", "log.info(\"OK, read\") return lexicon def _findval(elems, key): for form in", "diapivot_annotate(out: Output = Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to lemgrams\"), lemgram:", "+= [s_i] out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) + util.AFFIX if saldo_ids else", "It is initialized from a pickled file. \"\"\" def __init__(self,", "corresponding to lemgrams\"), lemgram: Annotation = Annotation(\"<token>:saldo.lemgram\"), model: Model =", "= elem.find(\"Lemma\") dalin, saldo = [], '' for form in", "ModelOutput, Output, annotator, modelbuilder log = logging.getLogger(__name__) PART_DELIM1 = \"^1\"", "_findval(form, \"match\") dalin += [(lem, match)] [lexicon.update({d: {'saldo': saldo, 'match':", "element context = iter(context) _event, root = next(context) for event,", "elem.tag == 'LexicalEntry': lemma = elem.find(\"Lemma\") dalin, saldo = [],", "Defaults to Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to lemgrams\"). lemgram (str,", "cross lexicon: %s\", crossfile) with open(crossfile, \"rb\") as F: self.lexicon", "save reference to root element context = iter(context) _event, root", "[] for lemgrams in lemgram_annotation: saldo_ids = [] for lemgram", "# @annotator(\"Diapivot annotation\", language=[\"swe-1800\"]) def diapivot_annotate(out: Output = Output(\"<token>:hist.diapivot\", description=\"SALDO", "if s_i: saldo_ids += [s_i] out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) + util.AFFIX", "tree to save memory if elem.tag in ['LexicalEntry', 'frame', 'resFrame']:", "'' for form in lemma.findall(\"FormRepresentation\"): cat = _findval(form, \"category\") lem", "a pickled file. \"\"\" def __init__(self, crossfile, verbose=True): \"\"\"Read pickled", "= {} context = etree.iterparse(xml, events=(\"start\", \"end\")) # \"start\" needed", "[]) + self.lexicon.get(lem.lower(), []) return list(map(_split_val, annotation_tag_pairs)) def get_exactMatch(self, word):", "log.info(\"Saving cross lexicon in Pickle format\") picklex = {} for", "out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) + util.AFFIX if saldo_ids else util.AFFIX) out.write(out_annotation)", "root = next(context) for event, elem in context: if event", "def _findval(elems, key): for form in elems: att = form.get(\"att\",", "= list(lemgram.read()) out_annotation = [] for lemgrams in lemgram_annotation: saldo_ids", "only exact matches from lexicon.\"\"\" s = self.lookup(word) if s", "model\", language=[\"swe\"]) def build_diapivot(out: ModelOutput = ModelOutput(\"hist/diapivot.pickle\")): \"\"\"Download diapivot XML", "in elems: att = form.get(\"att\", \"\") if att == key:", "%s\", crossfile) with open(crossfile, \"rb\") as F: self.lexicon = pickle.load(F)", "in the lexicon.\"\"\" if lem.lower() == lem: annotation_tag_pairs = self.lexicon.get(lem,", "'LexicalEntry': lemma = elem.find(\"Lemma\") dalin, saldo = [], '' for", "# \"start\" needed to save reference to root element context", "save memory if elem.tag in ['LexicalEntry', 'frame', 'resFrame']: root.clear() testwords", "to model. Args: out (str, optional): Resulting annotation file. Defaults", "lexicon.get_exactMatch(lemgram) if s_i: saldo_ids += [s_i] out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) +", "up xml_model.remove() ################################################################################ # Auxiliaries ################################################################################ class PivotLexicon: \"\"\"A lexicon", "corresponding to lemgrams\"). lemgram (str, optional): Existing lemgram annotation. Defaults", "\"modern\": saldo = lem else: match = _findval(form, \"match\") dalin", "model: Model = Model(\"hist/diapivot.pickle\")): \"\"\"Annotate each lemgram with its corresponding", "each lemgram with its corresponding saldo_id according to model. Args:", "PART_DELIM1 = \"^1\" # @annotator(\"Diapivot annotation\", language=[\"swe-1800\"]) def diapivot_annotate(out: Output", "lexicon for old swedish SALDO lookups. It is initialized from", "annotation. Defaults to Annotation(\"<token>:saldo.lemgram\"). model (str, optional): Crosslink model. Defaults", "optional): Crosslink model. Defaults to Model(\"hist/diapivot.pickle\"). \"\"\" lexicon = PivotLexicon(model)", "\"end\": if elem.tag == 'LexicalEntry': lemma = elem.find(\"Lemma\") dalin, saldo", "lexicon in Pickle format\") picklex = {} for lem in", "Args: out (str, optional): Resulting annotation file. Defaults to Output(\"<token>:hist.diapivot\",", "= PivotLexicon(model) lemgram_annotation = list(lemgram.read()) out_annotation = [] for lemgrams", "== \"exactMatch\": return s[1] def _split_val(key_val): return key_val.rsplit(PART_DELIM1)[1] def read_xml(xml):", "according to model. Args: out (str, optional): Resulting annotation file.", "{} context = etree.iterparse(xml, events=(\"start\", \"end\")) # \"start\" needed to", "= [] for lemgrams in lemgram_annotation: saldo_ids = [] for", "testwords) log.info(\"OK, read\") return lexicon def _findval(elems, key): for form", "context = etree.iterparse(xml, events=(\"start\", \"end\")) # \"start\" needed to save", "crossfile) with open(crossfile, \"rb\") as F: self.lexicon = pickle.load(F) if", "diapivot XML dictionary and save as a pickle file.\"\"\" #", "\"exactMatch\": return s[1] def _split_val(key_val): return key_val.rsplit(PART_DELIM1)[1] def read_xml(xml): \"\"\"Read", "in dalin] # Done parsing section. Clear tree to save", "the lexicon.\"\"\" if lem.lower() == lem: annotation_tag_pairs = self.lexicon.get(lem, [])", "elem.tag in ['LexicalEntry', 'frame', 'resFrame']: root.clear() testwords = [\"tigerhjerta..nn.1\", \"lågland..nn.1\",", "== 'LexicalEntry': lemma = elem.find(\"Lemma\") dalin, saldo = [], ''", "= self.lexicon.get(lem, []) + self.lexicon.get(lem.lower(), []) return list(map(_split_val, annotation_tag_pairs)) def", "Annotation, Model, ModelOutput, Output, annotator, modelbuilder log = logging.getLogger(__name__) PART_DELIM1", "Annotation = Annotation(\"<token>:saldo.lemgram\"), model: Model = Model(\"hist/diapivot.pickle\")): \"\"\"Annotate each lemgram", "def __init__(self, crossfile, verbose=True): \"\"\"Read pickled lexicon.\"\"\" if verbose: log.info(\"Reading", "xml_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml\") # Create pickle file xml_lexicon = read_xml(xml_model.path) log.info(\"Saving cross", "xml.etree.ElementTree as etree import sparv.util as util from sparv import", "'frame', 'resFrame']: root.clear() testwords = [\"tigerhjerta..nn.1\", \"lågland..nn.1\", \"gud..nn.1\"] util.test_lexicon(lexicon, testwords)", "with open(crossfile, \"rb\") as F: self.lexicon = pickle.load(F) if verbose:", "(str, optional): Crosslink model. Defaults to Model(\"hist/diapivot.pickle\"). \"\"\" lexicon =", "get_exactMatch(self, word): \"\"\"Get only exact matches from lexicon.\"\"\" s =", "sparv.util as util from sparv import Annotation, Model, ModelOutput, Output,", "next(context) for event, elem in context: if event == \"end\":", "(str, optional): Existing lemgram annotation. Defaults to Annotation(\"<token>:saldo.lemgram\"). model (str,", "for event, elem in context: if event == \"end\": if", "in xml_lexicon: lemgrams = [] for saldo, match in list(xml_lexicon[lem].items()):", "lexicon def _findval(elems, key): for form in elems: att =", "out (str, optional): Resulting annotation file. Defaults to Output(\"<token>:hist.diapivot\", description=\"SALDO", "saldo = [], '' for form in lemma.findall(\"FormRepresentation\"): cat =", "Model(\"hist/diapivot.pickle\"). \"\"\" lexicon = PivotLexicon(model) lemgram_annotation = list(lemgram.read()) out_annotation =", "\"category\") lem = _findval(form, \"lemgram\") if cat == \"modern\": saldo", "verbose: log.info(\"OK, read %d words\", len(self.lexicon)) def lookup(self, lem): \"\"\"Lookup", "verbose: log.info(\"Reading cross lexicon: %s\", crossfile) with open(crossfile, \"rb\") as", "old swedish SALDO lookups. It is initialized from a pickled", "picklex = {} for lem in xml_lexicon: lemgrams = []", "read %d words\", len(self.lexicon)) def lookup(self, lem): \"\"\"Lookup a word", "import xml.etree.ElementTree as etree import sparv.util as util from sparv", "memory if elem.tag in ['LexicalEntry', 'frame', 'resFrame']: root.clear() testwords =", "annotation\", language=[\"swe-1800\"]) def diapivot_annotate(out: Output = Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding", "elem in context: if event == \"end\": if elem.tag ==", "word in the lexicon.\"\"\" if lem.lower() == lem: annotation_tag_pairs =", "\"\"\"Create diapivot annotation.\"\"\" import logging import pickle import xml.etree.ElementTree as", "with its corresponding saldo_id according to model. Args: out (str,", "= lexicon.get_exactMatch(lemgram) if s_i: saldo_ids += [s_i] out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids))", "\"\"\"Annotate each lemgram with its corresponding saldo_id according to model.", "file. \"\"\" def __init__(self, crossfile, verbose=True): \"\"\"Read pickled lexicon.\"\"\" if", "for lemgram in lemgrams.split(util.DELIM): s_i = lexicon.get_exactMatch(lemgram) if s_i: saldo_ids", "xml_model.remove() ################################################################################ # Auxiliaries ################################################################################ class PivotLexicon: \"\"\"A lexicon for", "model. Args: out (str, optional): Resulting annotation file. Defaults to", "\"start\" needed to save reference to root element context =", "version of crosslinked lexicon.\"\"\" log.info(\"Reading XML lexicon\") lexicon = {}", "file.\"\"\" # Download diapivot.xml xml_model = Model(\"hist/diapivot.xml\") xml_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml\") # Create", "= Model(\"hist/diapivot.pickle\")): \"\"\"Annotate each lemgram with its corresponding saldo_id according", "'match': m}}) for (d, m) in dalin] # Done parsing", "for old swedish SALDO lookups. It is initialized from a", "lem.lower() == lem: annotation_tag_pairs = self.lexicon.get(lem, []) else: annotation_tag_pairs =", "lemgram in lemgrams.split(util.DELIM): s_i = lexicon.get_exactMatch(lemgram) if s_i: saldo_ids +=", "s_i = lexicon.get_exactMatch(lemgram) if s_i: saldo_ids += [s_i] out_annotation.append(util.AFFIX +", "Output = Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to lemgrams\"), lemgram: Annotation", "to lemgrams\"). lemgram (str, optional): Existing lemgram annotation. Defaults to", "diapivot annotation.\"\"\" import logging import pickle import xml.etree.ElementTree as etree", "(str, optional): Resulting annotation file. Defaults to Output(\"<token>:hist.diapivot\", description=\"SALDO IDs", "= [] for lemgram in lemgrams.split(util.DELIM): s_i = lexicon.get_exactMatch(lemgram) if", "lexicon = {} context = etree.iterparse(xml, events=(\"start\", \"end\")) # \"start\"", "dictionary and save as a pickle file.\"\"\" # Download diapivot.xml", "cat = _findval(form, \"category\") lem = _findval(form, \"lemgram\") if cat", "Model(\"hist/diapivot.xml\") xml_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml\") # Create pickle file xml_lexicon = read_xml(xml_model.path) log.info(\"Saving", "self.lexicon = pickle.load(F) if verbose: log.info(\"OK, read %d words\", len(self.lexicon))", "+= [(lem, match)] [lexicon.update({d: {'saldo': saldo, 'match': m}}) for (d,", "for lem in xml_lexicon: lemgrams = [] for saldo, match", "testwords = [\"tigerhjerta..nn.1\", \"lågland..nn.1\", \"gud..nn.1\"] util.test_lexicon(lexicon, testwords) log.info(\"OK, read\") return", "Defaults to Model(\"hist/diapivot.pickle\"). \"\"\" lexicon = PivotLexicon(model) lemgram_annotation = list(lemgram.read())", "Download diapivot.xml xml_model = Model(\"hist/diapivot.xml\") xml_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml\") # Create pickle file", "def get_exactMatch(self, word): \"\"\"Get only exact matches from lexicon.\"\"\" s", "from a pickled file. \"\"\" def __init__(self, crossfile, verbose=True): \"\"\"Read", "lemma = elem.find(\"Lemma\") dalin, saldo = [], '' for form", "section. Clear tree to save memory if elem.tag in ['LexicalEntry',", "Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to lemgrams\"). lemgram (str, optional): Existing", "%d words\", len(self.lexicon)) def lookup(self, lem): \"\"\"Lookup a word in", "saldo, 'match': m}}) for (d, m) in dalin] # Done", "\"match\") dalin += [(lem, match)] [lexicon.update({d: {'saldo': saldo, 'match': m}})", "att = form.get(\"att\", \"\") if att == key: return form.get(\"val\")", "annotator, modelbuilder log = logging.getLogger(__name__) PART_DELIM1 = \"^1\" # @annotator(\"Diapivot", "language=[\"swe\"]) def build_diapivot(out: ModelOutput = ModelOutput(\"hist/diapivot.pickle\")): \"\"\"Download diapivot XML dictionary", "################################################################################ # Auxiliaries ################################################################################ class PivotLexicon: \"\"\"A lexicon for old", "[] for lemgram in lemgrams.split(util.DELIM): s_i = lexicon.get_exactMatch(lemgram) if s_i:", "from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder log", "lemgrams in lemgram_annotation: saldo_ids = [] for lemgram in lemgrams.split(util.DELIM):", "= lem else: match = _findval(form, \"match\") dalin += [(lem,", "= iter(context) _event, root = next(context) for event, elem in", "_event, root = next(context) for event, elem in context: if", "lemgram_annotation = list(lemgram.read()) out_annotation = [] for lemgrams in lemgram_annotation:", "diapivot.xml xml_model = Model(\"hist/diapivot.xml\") xml_model.download(\"https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml\") # Create pickle file xml_lexicon", "'resFrame']: root.clear() testwords = [\"tigerhjerta..nn.1\", \"lågland..nn.1\", \"gud..nn.1\"] util.test_lexicon(lexicon, testwords) log.info(\"OK,", "read\") return lexicon def _findval(elems, key): for form in elems:", "s[0] == \"exactMatch\": return s[1] def _split_val(key_val): return key_val.rsplit(PART_DELIM1)[1] def", "= self.lookup(word) if s and s[0] == \"exactMatch\": return s[1]", "+ util.DELIM.join(set(saldo_ids)) + util.AFFIX if saldo_ids else util.AFFIX) out.write(out_annotation) #", "elem.find(\"Lemma\") dalin, saldo = [], '' for form in lemma.findall(\"FormRepresentation\"):", "saldo = lem else: match = _findval(form, \"match\") dalin +=", "IDs corresponding to lemgrams\"). lemgram (str, optional): Existing lemgram annotation.", "to save memory if elem.tag in ['LexicalEntry', 'frame', 'resFrame']: root.clear()", "corresponding saldo_id according to model. Args: out (str, optional): Resulting", "[], '' for form in lemma.findall(\"FormRepresentation\"): cat = _findval(form, \"category\")", "= {} for lem in xml_lexicon: lemgrams = [] for", "[] for saldo, match in list(xml_lexicon[lem].items()): lemgrams.append(PART_DELIM1.join([saldo, match])) picklex[lem] =", "s and s[0] == \"exactMatch\": return s[1] def _split_val(key_val): return", "= Annotation(\"<token>:saldo.lemgram\"), model: Model = Model(\"hist/diapivot.pickle\")): \"\"\"Annotate each lemgram with", "util.AFFIX if saldo_ids else util.AFFIX) out.write(out_annotation) # @modelbuilder(\"Diapivot model\", language=[\"swe\"])", "in Pickle format\") picklex = {} for lem in xml_lexicon:", "to save reference to root element context = iter(context) _event,", "= etree.iterparse(xml, events=(\"start\", \"end\")) # \"start\" needed to save reference", "language=[\"swe-1800\"]) def diapivot_annotate(out: Output = Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to", "dalin] # Done parsing section. Clear tree to save memory", "save as a pickle file.\"\"\" # Download diapivot.xml xml_model =", "description=\"SALDO IDs corresponding to lemgrams\"). lemgram (str, optional): Existing lemgram", "file xml_lexicon = read_xml(xml_model.path) log.info(\"Saving cross lexicon in Pickle format\")", "saldo, match in list(xml_lexicon[lem].items()): lemgrams.append(PART_DELIM1.join([saldo, match])) picklex[lem] = sorted(lemgrams) out.write_pickle(picklex)", "event, elem in context: if event == \"end\": if elem.tag", "if elem.tag in ['LexicalEntry', 'frame', 'resFrame']: root.clear() testwords = [\"tigerhjerta..nn.1\",", "initialized from a pickled file. \"\"\" def __init__(self, crossfile, verbose=True):", "saldo_id according to model. Args: out (str, optional): Resulting annotation", "lemgram annotation. Defaults to Annotation(\"<token>:saldo.lemgram\"). model (str, optional): Crosslink model.", "word): \"\"\"Get only exact matches from lexicon.\"\"\" s = self.lookup(word)", "{'saldo': saldo, 'match': m}}) for (d, m) in dalin] #", "else util.AFFIX) out.write(out_annotation) # @modelbuilder(\"Diapivot model\", language=[\"swe\"]) def build_diapivot(out: ModelOutput", "match])) picklex[lem] = sorted(lemgrams) out.write_pickle(picklex) # Clean up xml_model.remove() ################################################################################", "lexicon\") lexicon = {} context = etree.iterparse(xml, events=(\"start\", \"end\")) #", "def diapivot_annotate(out: Output = Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to lemgrams\"),", "# Done parsing section. Clear tree to save memory if", "Pickle format\") picklex = {} for lem in xml_lexicon: lemgrams", "m}}) for (d, m) in dalin] # Done parsing section.", "lexicon: %s\", crossfile) with open(crossfile, \"rb\") as F: self.lexicon =", "form.get(\"att\", \"\") if att == key: return form.get(\"val\") return \"\"", "pickle import xml.etree.ElementTree as etree import sparv.util as util from", "PivotLexicon(model) lemgram_annotation = list(lemgram.read()) out_annotation = [] for lemgrams in", "Done parsing section. Clear tree to save memory if elem.tag", "lem else: match = _findval(form, \"match\") dalin += [(lem, match)]", "\"rb\") as F: self.lexicon = pickle.load(F) if verbose: log.info(\"OK, read", "logging import pickle import xml.etree.ElementTree as etree import sparv.util as", "to root element context = iter(context) _event, root = next(context)", "lem = _findval(form, \"lemgram\") if cat == \"modern\": saldo =", "log.info(\"Reading cross lexicon: %s\", crossfile) with open(crossfile, \"rb\") as F:", "crosslinked lexicon.\"\"\" log.info(\"Reading XML lexicon\") lexicon = {} context =", "util.test_lexicon(lexicon, testwords) log.info(\"OK, read\") return lexicon def _findval(elems, key): for", "format\") picklex = {} for lem in xml_lexicon: lemgrams =", "as util from sparv import Annotation, Model, ModelOutput, Output, annotator,", "# @modelbuilder(\"Diapivot model\", language=[\"swe\"]) def build_diapivot(out: ModelOutput = ModelOutput(\"hist/diapivot.pickle\")): \"\"\"Download", "\"\"\"A lexicon for old swedish SALDO lookups. It is initialized", "= form.get(\"att\", \"\") if att == key: return form.get(\"val\") return", "F: self.lexicon = pickle.load(F) if verbose: log.info(\"OK, read %d words\",", "pickle file xml_lexicon = read_xml(xml_model.path) log.info(\"Saving cross lexicon in Pickle", "to Output(\"<token>:hist.diapivot\", description=\"SALDO IDs corresponding to lemgrams\"). lemgram (str, optional):", "xml_lexicon: lemgrams = [] for saldo, match in list(xml_lexicon[lem].items()): lemgrams.append(PART_DELIM1.join([saldo,", "= [] for saldo, match in list(xml_lexicon[lem].items()): lemgrams.append(PART_DELIM1.join([saldo, match])) picklex[lem]", "# Clean up xml_model.remove() ################################################################################ # Auxiliaries ################################################################################ class PivotLexicon:", "else: match = _findval(form, \"match\") dalin += [(lem, match)] [lexicon.update({d:", "form in elems: att = form.get(\"att\", \"\") if att ==", "return list(map(_split_val, annotation_tag_pairs)) def get_exactMatch(self, word): \"\"\"Get only exact matches", "Defaults to Annotation(\"<token>:saldo.lemgram\"). model (str, optional): Crosslink model. Defaults to", "= ModelOutput(\"hist/diapivot.pickle\")): \"\"\"Download diapivot XML dictionary and save as a", "in list(xml_lexicon[lem].items()): lemgrams.append(PART_DELIM1.join([saldo, match])) picklex[lem] = sorted(lemgrams) out.write_pickle(picklex) # Clean", "model (str, optional): Crosslink model. Defaults to Model(\"hist/diapivot.pickle\"). \"\"\" lexicon", "return s[1] def _split_val(key_val): return key_val.rsplit(PART_DELIM1)[1] def read_xml(xml): \"\"\"Read the" ]
[ "= os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(current_path))) ) return os.path.join(root_path, \"xbot\") def get_config_path(): config_path", "\"xbot\") def get_config_path(): config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../config\")) return config_path def", "def get_config_path(): config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../config\")) return config_path def get_data_path():", "current_path = os.path.abspath(os.path.dirname(__file__)) root_path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(current_path))) ) return os.path.join(root_path,", "root_path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(current_path))) ) return os.path.join(root_path, \"xbot\") def get_config_path():", "def get_data_path(): data_path = os.path.abspath( os.path.join(os.path.dirname(__file__), \"../../../data/\") ) return data_path", "config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../config\")) return config_path def get_data_path(): data_path =", "os.path.abspath(os.path.join(os.path.dirname(__file__), \"../config\")) return config_path def get_data_path(): data_path = os.path.abspath( os.path.join(os.path.dirname(__file__),", "get_root_path(): current_path = os.path.abspath(os.path.dirname(__file__)) root_path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(current_path))) ) return", "os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(current_path))) ) return os.path.join(root_path, \"xbot\") def get_config_path(): config_path =", "get_config_path(): config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../config\")) return config_path def get_data_path(): data_path", "return config_path def get_data_path(): data_path = os.path.abspath( os.path.join(os.path.dirname(__file__), \"../../../data/\") )", "config_path def get_data_path(): data_path = os.path.abspath( os.path.join(os.path.dirname(__file__), \"../../../data/\") ) return", "= os.path.abspath(os.path.join(os.path.dirname(__file__), \"../config\")) return config_path def get_data_path(): data_path = os.path.abspath(", "os.path.abspath(os.path.dirname(__file__)) root_path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(current_path))) ) return os.path.join(root_path, \"xbot\") def", "\"../config\")) return config_path def get_data_path(): data_path = os.path.abspath( os.path.join(os.path.dirname(__file__), \"../../../data/\")", "def get_root_path(): current_path = os.path.abspath(os.path.dirname(__file__)) root_path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(current_path))) )", "os.path.dirname(os.path.dirname(os.path.dirname(current_path))) ) return os.path.join(root_path, \"xbot\") def get_config_path(): config_path = os.path.abspath(os.path.join(os.path.dirname(__file__),", "os def get_root_path(): current_path = os.path.abspath(os.path.dirname(__file__)) root_path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(current_path)))", "import os def get_root_path(): current_path = os.path.abspath(os.path.dirname(__file__)) root_path = os.path.dirname(", "return os.path.join(root_path, \"xbot\") def get_config_path(): config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../config\")) return", ") return os.path.join(root_path, \"xbot\") def get_config_path(): config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../config\"))", "= os.path.abspath(os.path.dirname(__file__)) root_path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(current_path))) ) return os.path.join(root_path, \"xbot\")", "os.path.join(root_path, \"xbot\") def get_config_path(): config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../config\")) return config_path" ]
[ "import format_html from wagtail.wagtailcore import hooks @hooks.register('insert_editor_js') def enable_source(): return", "def enable_source(): return format_html( \"\"\" <script> registerHalloPlugin('hallohtml'); </script> \"\"\" )", "format_html from wagtail.wagtailcore import hooks @hooks.register('insert_editor_js') def enable_source(): return format_html(", "from wagtail.wagtailcore import hooks @hooks.register('insert_editor_js') def enable_source(): return format_html( \"\"\"", "from django.utils.html import format_html from wagtail.wagtailcore import hooks @hooks.register('insert_editor_js') def", "wagtail.wagtailcore import hooks @hooks.register('insert_editor_js') def enable_source(): return format_html( \"\"\" <script>", "django.utils.html import format_html from wagtail.wagtailcore import hooks @hooks.register('insert_editor_js') def enable_source():", "@hooks.register('insert_editor_js') def enable_source(): return format_html( \"\"\" <script> registerHalloPlugin('hallohtml'); </script> \"\"\"", "hooks @hooks.register('insert_editor_js') def enable_source(): return format_html( \"\"\" <script> registerHalloPlugin('hallohtml'); </script>", "import hooks @hooks.register('insert_editor_js') def enable_source(): return format_html( \"\"\" <script> registerHalloPlugin('hallohtml');" ]
[ "api_url = \"{}/\".format(QL_URL) r = requests.get('{}'.format(api_url)) assert r.status_code == 200,", "r.status_code == 200, r.text assert r.json() == { \"notify_url\": \"/v2/notify\",", "test_api(): api_url = \"{}/\".format(QL_URL) r = requests.get('{}'.format(api_url)) assert r.status_code ==", "QL_URL import requests def test_api(): api_url = \"{}/\".format(QL_URL) r =", "conftest import QL_URL import requests def test_api(): api_url = \"{}/\".format(QL_URL)", "\"{}/\".format(QL_URL) r = requests.get('{}'.format(api_url)) assert r.status_code == 200, r.text assert", "\"notify_url\": \"/v2/notify\", \"subscriptions_url\": \"/v2/subscriptions\", \"entities_url\": \"/v2/entities\", \"types_url\": \"/v2/types\", \"attributes_url\": \"/v2/attrs\"", "= requests.get('{}'.format(api_url)) assert r.status_code == 200, r.text assert r.json() ==", "assert r.status_code == 200, r.text assert r.json() == { \"notify_url\":", "r = requests.get('{}'.format(api_url)) assert r.status_code == 200, r.text assert r.json()", "200, r.text assert r.json() == { \"notify_url\": \"/v2/notify\", \"subscriptions_url\": \"/v2/subscriptions\",", "== { \"notify_url\": \"/v2/notify\", \"subscriptions_url\": \"/v2/subscriptions\", \"entities_url\": \"/v2/entities\", \"types_url\": \"/v2/types\",", "{ \"notify_url\": \"/v2/notify\", \"subscriptions_url\": \"/v2/subscriptions\", \"entities_url\": \"/v2/entities\", \"types_url\": \"/v2/types\", \"attributes_url\":", "import requests def test_api(): api_url = \"{}/\".format(QL_URL) r = requests.get('{}'.format(api_url))", "import QL_URL import requests def test_api(): api_url = \"{}/\".format(QL_URL) r", "requests def test_api(): api_url = \"{}/\".format(QL_URL) r = requests.get('{}'.format(api_url)) assert", "= \"{}/\".format(QL_URL) r = requests.get('{}'.format(api_url)) assert r.status_code == 200, r.text", "r.text assert r.json() == { \"notify_url\": \"/v2/notify\", \"subscriptions_url\": \"/v2/subscriptions\", \"entities_url\":", "== 200, r.text assert r.json() == { \"notify_url\": \"/v2/notify\", \"subscriptions_url\":", "from conftest import QL_URL import requests def test_api(): api_url =", "assert r.json() == { \"notify_url\": \"/v2/notify\", \"subscriptions_url\": \"/v2/subscriptions\", \"entities_url\": \"/v2/entities\",", "requests.get('{}'.format(api_url)) assert r.status_code == 200, r.text assert r.json() == {", "\"/v2/notify\", \"subscriptions_url\": \"/v2/subscriptions\", \"entities_url\": \"/v2/entities\", \"types_url\": \"/v2/types\", \"attributes_url\": \"/v2/attrs\" }", "def test_api(): api_url = \"{}/\".format(QL_URL) r = requests.get('{}'.format(api_url)) assert r.status_code", "r.json() == { \"notify_url\": \"/v2/notify\", \"subscriptions_url\": \"/v2/subscriptions\", \"entities_url\": \"/v2/entities\", \"types_url\":" ]
[ "the worker. I return False if the file with the", "if cmd.didFail(): return False s = cmd.updates[\"stat\"][-1] filemode = s[stat.ST_MODE]", "Use me with doStepIf to make a build step conditional", "that is a directory. Use me with doStepIf to make", "file with the given name exists or that is a", "file does not exist or that is a directory. Use", "to nonexistence of some file. For example doStepIf=FileDoesNotExist('build/configure') \"\"\" def", "I return True if the file with the given name", "class FileDoesNotExist(object): \"\"\"I check a file existence on the worker.", "FileExists(object): \"\"\"I check a file existence on the worker. I", "nonexistence of some file. For example doStepIf=FileDoesNotExist('build/configure') \"\"\" def __init__(self,", "given name exists or that is a directory, True if", "True only if this is a file or a link", "if any filesystem object with the given name exists. return", "this is a file or a link and not any", "d.addCallback(lambda res: self.commandComplete(cmd)) return d def commandComplete(self, cmd): # False", "d.addCallback(lambda res: self.commandComplete(cmd)) return d def commandComplete(self, cmd): if cmd.didFail():", "step conditional to existence of some file. For example doStepIf=FileExists('build/configure')", "res: self.commandComplete(cmd)) return d def commandComplete(self, cmd): if cmd.didFail(): return", "system object. return True else: return False class FileDoesNotExist(object): \"\"\"I", "a file existence on the worker. I return True if", "return False class FileDoesNotExist(object): \"\"\"I check a file existence on", "name exists or that is a directory, True if the", "d def commandComplete(self, cmd): # False if any filesystem object", "filename def __call__(self, step): step.checkWorkerHasCommand('stat') cmd = RemoteCommand('stat', {'file': self.filename})", "make a build step conditional to existence of some file.", "cmd = RemoteCommand('stat', {'file': self.filename}) d = step.runCommand(cmd) d.addCallback(lambda res:", "# False if any filesystem object with the given name", "a build step conditional to nonexistence of some file. For", "__call__(self, step): step.checkWorkerHasCommand('stat') cmd = RemoteCommand('stat', {'file': self.filename}) d =", "directory. Use me with doStepIf to make a build step", "conditional to existence of some file. For example doStepIf=FileExists('build/configure') \"\"\"", "a directory. Use me with doStepIf to make a build", "file with the given name exists, False if the file", "me with doStepIf to make a build step conditional to", "if the file with the given name exists or that", "to existence of some file. For example doStepIf=FileExists('build/configure') \"\"\" def", "link and not any other file # system object. return", "make a build step conditional to nonexistence of some file.", "self.filename = filename def __call__(self, step): step.checkWorkerHasCommand('stat') cmd = RemoteCommand('stat',", "does not exist or that is a directory. Use me", "if stat.S_ISREG(filemode) or stat.S_ISLNK(filemode): # True only if this is", "# True only if this is a file or a", "step): step.checkWorkerHasCommand('stat') cmd = RemoteCommand('stat', {'file': self.filename}) d = step.runCommand(cmd)", "step conditional to nonexistence of some file. For example doStepIf=FileDoesNotExist('build/configure')", "doStepIf to make a build step conditional to existence of", "or that is a directory, True if the file does", "False if the file with the given name exists or", "def commandComplete(self, cmd): if cmd.didFail(): return False s = cmd.updates[\"stat\"][-1]", "import RemoteCommand from buildbot.interfaces import WorkerTooOldError import stat class FileExists(object):", "\"\"\" def __init__(self, filename): self.filename = filename def __call__(self, step):", "not exist or that is a directory. Use me with", "return True if the file with the given name exists,", "file does not exist. Use me with doStepIf to make", "if the file with the given name exists, False if", "def __init__(self, filename): self.filename = filename def __call__(self, step): step.checkWorkerHasCommand('stat')", "not exist. Use me with doStepIf to make a build", "a link and not any other file # system object.", "is a directory, True if the file does not exist.", "and not any other file # system object. return True", "check a file existence on the worker. I return False", "exist or that is a directory. Use me with doStepIf", "stat.S_ISLNK(filemode): # True only if this is a file or", "cmd): # False if any filesystem object with the given", "RemoteCommand from buildbot.interfaces import WorkerTooOldError import stat class FileExists(object): \"\"\"I", "a build step conditional to existence of some file. For", "cmd.didFail(): return False s = cmd.updates[\"stat\"][-1] filemode = s[stat.ST_MODE] if", "cmd.updates[\"stat\"][-1] filemode = s[stat.ST_MODE] if stat.S_ISREG(filemode) or stat.S_ISLNK(filemode): # True", "or that is a directory. Use me with doStepIf to", "return d def commandComplete(self, cmd): if cmd.didFail(): return False s", "example doStepIf=FileDoesNotExist('build/configure') \"\"\" def __init__(self, filename): self.filename = filename def", "commandComplete(self, cmd): if cmd.didFail(): return False s = cmd.updates[\"stat\"][-1] filemode", "the file with the given name exists, False if the", "True else: return False class FileDoesNotExist(object): \"\"\"I check a file", "is a file or a link and not any other", "from buildbot.process.remotecommand import RemoteCommand from buildbot.interfaces import WorkerTooOldError import stat", "doStepIf=FileExists('build/configure') \"\"\" def __init__(self, filename): self.filename = filename def __call__(self,", "For example doStepIf=FileDoesNotExist('build/configure') \"\"\" def __init__(self, filename): self.filename = filename", "doStepIf to make a build step conditional to nonexistence of", "WorkerTooOldError import stat class FileExists(object): \"\"\"I check a file existence", "to make a build step conditional to existence of some", "object. return True else: return False class FileDoesNotExist(object): \"\"\"I check", "RemoteCommand('stat', {'file': self.filename}) d = step.runCommand(cmd) d.addCallback(lambda res: self.commandComplete(cmd)) return", "res: self.commandComplete(cmd)) return d def commandComplete(self, cmd): # False if", "the file with the given name exists or that is", "the file does not exist. Use me with doStepIf to", "= step.runCommand(cmd) d.addCallback(lambda res: self.commandComplete(cmd)) return d def commandComplete(self, cmd):", "s = cmd.updates[\"stat\"][-1] filemode = s[stat.ST_MODE] if stat.S_ISREG(filemode) or stat.S_ISLNK(filemode):", "doStepIf=FileDoesNotExist('build/configure') \"\"\" def __init__(self, filename): self.filename = filename def __call__(self,", "from buildbot.interfaces import WorkerTooOldError import stat class FileExists(object): \"\"\"I check", "worker. I return True if the file with the given", "on the worker. I return False if the file with", "self.commandComplete(cmd)) return d def commandComplete(self, cmd): # False if any", "filename): self.filename = filename def __call__(self, step): step.checkWorkerHasCommand('stat') cmd =", "= RemoteCommand('stat', {'file': self.filename}) d = step.runCommand(cmd) d.addCallback(lambda res: self.commandComplete(cmd))", "with the given name exists or that is a directory,", "a file or a link and not any other file", "= s[stat.ST_MODE] if stat.S_ISREG(filemode) or stat.S_ISLNK(filemode): # True only if", "some file. For example doStepIf=FileDoesNotExist('build/configure') \"\"\" def __init__(self, filename): self.filename", "False if the file does not exist or that is", "{'file': self.filename}) d = step.runCommand(cmd) d.addCallback(lambda res: self.commandComplete(cmd)) return d", "file existence on the worker. I return False if the", "existence on the worker. I return True if the file", "build step conditional to nonexistence of some file. For example", "if the file does not exist or that is a", "def commandComplete(self, cmd): # False if any filesystem object with", "of some file. For example doStepIf=FileDoesNotExist('build/configure') \"\"\" def __init__(self, filename):", "or stat.S_ISLNK(filemode): # True only if this is a file", "or a link and not any other file # system", "__init__(self, filename): self.filename = filename def __call__(self, step): step.checkWorkerHasCommand('stat') cmd", "the given name exists, False if the file does not", "I return False if the file with the given name", "example doStepIf=FileExists('build/configure') \"\"\" def __init__(self, filename): self.filename = filename def", "name exists, False if the file does not exist or", "False if any filesystem object with the given name exists.", "stat.S_ISREG(filemode) or stat.S_ISLNK(filemode): # True only if this is a", "d = step.runCommand(cmd) d.addCallback(lambda res: self.commandComplete(cmd)) return d def commandComplete(self,", "not any other file # system object. return True else:", "check a file existence on the worker. I return True", "is a directory. Use me with doStepIf to make a", "some file. For example doStepIf=FileExists('build/configure') \"\"\" def __init__(self, filename): self.filename", "other file # system object. return True else: return False", "stat class FileExists(object): \"\"\"I check a file existence on the", "the given name exists or that is a directory, True", "commandComplete(self, cmd): # False if any filesystem object with the", "that is a directory, True if the file does not", "any filesystem object with the given name exists. return cmd.didFail()", "if the file does not exist. Use me with doStepIf", "s[stat.ST_MODE] if stat.S_ISREG(filemode) or stat.S_ISLNK(filemode): # True only if this", "= filename def __call__(self, step): step.checkWorkerHasCommand('stat') cmd = RemoteCommand('stat', {'file':", "to make a build step conditional to nonexistence of some", "the worker. I return True if the file with the", "False s = cmd.updates[\"stat\"][-1] filemode = s[stat.ST_MODE] if stat.S_ISREG(filemode) or", "buildbot.process.remotecommand import RemoteCommand from buildbot.interfaces import WorkerTooOldError import stat class", "on the worker. I return True if the file with", "= cmd.updates[\"stat\"][-1] filemode = s[stat.ST_MODE] if stat.S_ISREG(filemode) or stat.S_ISLNK(filemode): #", "class FileExists(object): \"\"\"I check a file existence on the worker.", "file. For example doStepIf=FileExists('build/configure') \"\"\" def __init__(self, filename): self.filename =", "return False s = cmd.updates[\"stat\"][-1] filemode = s[stat.ST_MODE] if stat.S_ISREG(filemode)", "a directory, True if the file does not exist. Use", "filemode = s[stat.ST_MODE] if stat.S_ISREG(filemode) or stat.S_ISLNK(filemode): # True only", "self.commandComplete(cmd)) return d def commandComplete(self, cmd): if cmd.didFail(): return False", "buildbot.interfaces import WorkerTooOldError import stat class FileExists(object): \"\"\"I check a", "True if the file with the given name exists, False", "step.runCommand(cmd) d.addCallback(lambda res: self.commandComplete(cmd)) return d def commandComplete(self, cmd): if", "with the given name exists, False if the file does", "import stat class FileExists(object): \"\"\"I check a file existence on", "with doStepIf to make a build step conditional to nonexistence", "def __call__(self, step): step.checkWorkerHasCommand('stat') cmd = RemoteCommand('stat', {'file': self.filename}) d", "conditional to nonexistence of some file. For example doStepIf=FileDoesNotExist('build/configure') \"\"\"", "cmd): if cmd.didFail(): return False s = cmd.updates[\"stat\"][-1] filemode =", "return True else: return False class FileDoesNotExist(object): \"\"\"I check a", "directory, True if the file does not exist. Use me", "existence on the worker. I return False if the file", "False class FileDoesNotExist(object): \"\"\"I check a file existence on the", "the file does not exist or that is a directory.", "only if this is a file or a link and", "exists, False if the file does not exist or that", "file existence on the worker. I return True if the", "d def commandComplete(self, cmd): if cmd.didFail(): return False s =", "if this is a file or a link and not", "FileDoesNotExist(object): \"\"\"I check a file existence on the worker. I", "exists or that is a directory, True if the file", "given name exists, False if the file does not exist", "exist. Use me with doStepIf to make a build step", "For example doStepIf=FileExists('build/configure') \"\"\" def __init__(self, filename): self.filename = filename", "of some file. For example doStepIf=FileExists('build/configure') \"\"\" def __init__(self, filename):", "any other file # system object. return True else: return", "else: return False class FileDoesNotExist(object): \"\"\"I check a file existence", "worker. I return False if the file with the given", "import WorkerTooOldError import stat class FileExists(object): \"\"\"I check a file", "True if the file does not exist. Use me with", "# system object. return True else: return False class FileDoesNotExist(object):", "existence of some file. For example doStepIf=FileExists('build/configure') \"\"\" def __init__(self,", "step.checkWorkerHasCommand('stat') cmd = RemoteCommand('stat', {'file': self.filename}) d = step.runCommand(cmd) d.addCallback(lambda", "file # system object. return True else: return False class", "a file existence on the worker. I return False if", "file or a link and not any other file #", "return False if the file with the given name exists", "step.runCommand(cmd) d.addCallback(lambda res: self.commandComplete(cmd)) return d def commandComplete(self, cmd): #", "does not exist. Use me with doStepIf to make a", "file. For example doStepIf=FileDoesNotExist('build/configure') \"\"\" def __init__(self, filename): self.filename =", "\"\"\"I check a file existence on the worker. I return", "build step conditional to existence of some file. For example", "with doStepIf to make a build step conditional to existence", "self.filename}) d = step.runCommand(cmd) d.addCallback(lambda res: self.commandComplete(cmd)) return d def", "return d def commandComplete(self, cmd): # False if any filesystem" ]
[ "= Entity(blue_decision_maker) env.red_player = Entity(red_decision_maker) print_start_of_game_info(blue_decision_maker, red_decision_maker) NUM_OF_EPISODES = env.NUMBER_OF_EPISODES", "the action! current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin) if current_episode.is_terminal:", "% EVALUATE_PLAYERS_EVERY == 0: a = episode_number % EVALUATE_PLAYERS_EVERY if", "(env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin) if current_episode.is_terminal: # Blue won the", "steps_current_game) current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin) if current_episode.is_terminal:# Blue", "import matplotlib.pyplot as plt plt.matshow(observation_for_blue_s0.img) plt.show() plt.matshow(observation_for_blue_s1.img) plt.show() if __name__", "print(\"Red player starting tournament with trained model: \" , red_decision_maker.path_model_to_load)", "def evaluate(episode_number): #if episode_number % EVALUATE_PLAYERS_EVERY == 0: a =", "= episode_number % EVALUATE_PLAYERS_EVERY if a>=0 and a<EVALUATE_BATCH_SIZE: EVALUATE =", "game! blue_won_the_game=True else: ##### Red's turn! ##### observation_for_red_s0: State =", "a<EVALUATE_BATCH_SIZE: EVALUATE = True else: EVALUATE = False return EVALUATE", "we reached MAX_STEPS_PER_EPISODE current_episode.is_terminal = True if blue_won_the_game or red_won_the_game:", "env.end_run() if blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0,", "starting with no model\") else: print(\"Red player starting tournament with", "we exited the loop because we reached MAX_STEPS_PER_EPISODE current_episode.is_terminal =", "model: \" , blue_decision_maker.path_model_to_load) print(\"Red player type: \", Agent_type_str[red_decision_maker.type()]) if", "MAX_STEPS_PER_EPISODE + 1): ##### Blue's turn! ##### observation_for_blue_s0: State =", "current_episode.print_episode(env, steps_current_game) current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin) if current_episode.is_terminal:#", "current_episode.is_terminal: # Blue won the game! red_won_the_game = True current_episode.print_episode(env,", "if __name__ == '__main__': env = Environment(IS_TRAINING) print(\"Starting Blue player\")", "red player\") ### Red Decision Maker red_decision_maker = Greedy_player.Greedy_player() env.blue_player", "evaluate(episode) current_episode = Episode(episode, EVALUATE, show_always=False if IS_TRAINING else True)", "State = env.get_observation_for_blue() current_episode.print_episode(env, steps_current_game) action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE)", "True else: EVALUATE = False return EVALUATE def print_states(observation_for_blue_s0, observation_for_blue_s1):", "import Environment, Episode from gym_combat.envs.Common.constants import * from gym_combat.envs.Qtable import", "plt.show() plt.matshow(observation_for_blue_s1.img) plt.show() if __name__ == '__main__': env = Environment(IS_TRAINING)", "player\") blue_decision_maker = DQNAgent_keras.DQNAgent_keras() #blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_", "observation observation_for_blue_s0: State = env.get_observation_for_blue() action_blue = -1 # initialize", "Blue won the game! red_won_the_game = True current_episode.print_episode(env, steps_current_game) reward_step_blue,", "plt.matshow(observation_for_blue_s1.img) plt.show() if __name__ == '__main__': env = Environment(IS_TRAINING) print(\"Starting", "game! red_won_the_game = True current_episode.print_episode(env, steps_current_game) reward_step_blue, reward_step_red = env.handle_reward(steps_current_game)", "the game! blue_won_the_game=True else: ##### Red's turn! ##### observation_for_red_s0: State", ", blue_decision_maker.path_model_to_load) print(\"Red player type: \", Agent_type_str[red_decision_maker.type()]) if red_decision_maker.path_model_to_load==None: print(\"Red", "== MAX_STEPS_PER_EPISODE: # if we exited the loop because we", "% SAVE_STATS_EVERY == 0: if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() ==", "= evaluate(episode) current_episode = Episode(episode, EVALUATE, show_always=False if IS_TRAINING else", "action_red) # take the action! current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not", "0.95max_ -0.04avg_ -3.10min__1620558885.model') print(\"Starting red player\") ### Red Decision Maker", "tqdm import tqdm style.use(\"ggplot\") from gym_combat.envs.Arena.CState import State from gym_combat.envs.Arena.Entity", "no model\") else: print(\"Blue player starting tournament with trained model:", "print(\"Red player starting with no model\") else: print(\"Red player starting", "+ 1), ascii=True, unit='episodes'): EVALUATE = evaluate(episode) current_episode = Episode(episode,", "gym_combat.envs.Qtable import Qtable_DecisionMaker from gym_combat.envs.DQN import DQNAgent_keras from gym_combat.envs.Greedy import", "Episode(episode, EVALUATE, show_always=False if IS_TRAINING else True) # set new", "env.get_observation_for_red() action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE) env.take_action(Color.Red, action_red) # take", "get observation observation_for_blue_s0: State = env.get_observation_for_blue() action_blue = -1 #", "steps_current_game in range(1, MAX_STEPS_PER_EPISODE + 1): ##### Blue's turn! #####", "range(1, MAX_STEPS_PER_EPISODE + 1): ##### Blue's turn! ##### observation_for_blue_s0: State", "red_won_the_game = True current_episode.print_episode(env, steps_current_game) reward_step_blue, reward_step_red = env.handle_reward(steps_current_game) current_episode.episode_reward_red", "current_episode.print_episode(env, steps_current_game) reward_step_blue, reward_step_red = env.handle_reward(steps_current_game) current_episode.episode_reward_red += reward_step_red current_episode.episode_reward_blue", "observation_for_red_s0: State = env.get_observation_for_red() action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE) env.take_action(Color.Red,", "EVALUATE_PLAYERS_EVERY == 0: a = episode_number % EVALUATE_PLAYERS_EVERY if a>=0", "False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, \"conv\")#env.save_folder_path) #", "False red_won_the_game = False for steps_current_game in range(1, MAX_STEPS_PER_EPISODE +", "action! current_episode.print_episode(env, steps_current_game) current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin) if", "env.NUMBER_OF_EPISODES for episode in tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True, unit='episodes'):", "NUM_OF_EPISODES + 1), ascii=True, unit='episodes'): EVALUATE = evaluate(episode) current_episode =", "env.get_observation_for_blue() action_blue = -1 # initialize the decision_makers for the", "plt plt.matshow(observation_for_blue_s0.img) plt.show() plt.matshow(observation_for_blue_s1.img) plt.show() if __name__ == '__main__': env", "not WinEnum.NoWin) if current_episode.is_terminal:# Blue won the game! blue_won_the_game=True else:", "True) # set new start position for the players env.reset_game(episode)", "the players env.reset_game(episode) # get observation observation_for_blue_s0: State = env.get_observation_for_blue()", "loop because we reached MAX_STEPS_PER_EPISODE current_episode.is_terminal = True if blue_won_the_game", "env.handle_reward(steps_current_game) current_episode.episode_reward_red += reward_step_red current_episode.episode_reward_blue += reward_step_blue observation_for_blue_s1: State =", "current_episode.is_terminal, EVALUATE) if steps_current_game == MAX_STEPS_PER_EPISODE: # if we exited", "evaluate(episode_number): #if episode_number % EVALUATE_PLAYERS_EVERY == 0: a = episode_number", "reward_step_blue, reward_step_red = env.handle_reward(steps_current_game) current_episode.episode_reward_red += reward_step_red current_episode.episode_reward_blue += reward_step_blue", "from gym_combat.envs.DQN import DQNAgent_keras from gym_combat.envs.Greedy import Greedy_player import matplotlib.pyplot", "plt.show() if __name__ == '__main__': env = Environment(IS_TRAINING) print(\"Starting Blue", "#blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model') print(\"Starting red player\")", "return EVALUATE def print_states(observation_for_blue_s0, observation_for_blue_s1): import matplotlib.pyplot as plt plt.matshow(observation_for_blue_s0.img)", "MAX_STEPS_PER_EPISODE current_episode.is_terminal = True if blue_won_the_game or red_won_the_game: break #", "position for the players env.reset_game(episode) # get observation observation_for_blue_s0: State", "because we reached MAX_STEPS_PER_EPISODE current_episode.is_terminal = True if blue_won_the_game or", "##### Red's turn! ##### observation_for_red_s0: State = env.get_observation_for_red() action_red: AgentAction", "== 0: a = episode_number % EVALUATE_PLAYERS_EVERY if a>=0 and", "current_episode.episode_reward_blue += reward_step_blue observation_for_blue_s1: State = env.get_observation_for_blue() blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue,", "State from gym_combat.envs.Arena.Entity import Entity from gym_combat.envs.Arena.Environment import Environment, Episode", "if red_decision_maker.path_model_to_load==None: print(\"Red player starting with no model\") else: print(\"Red", "gym_combat.envs.Greedy import Greedy_player import matplotlib.pyplot as plt def print_start_of_game_info(blue_decision_maker, red_decision_maker):", "##### Blue's turn! ##### observation_for_blue_s0: State = env.get_observation_for_blue() current_episode.print_episode(env, steps_current_game)", "turn! ##### observation_for_blue_s0: State = env.get_observation_for_blue() current_episode.print_episode(env, steps_current_game) action_blue: AgentAction", "statistics env.update_win_counters(steps_current_game) env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon()) env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon())", "with no model\") else: print(\"Red player starting tournament with trained", "#if episode_number % EVALUATE_PLAYERS_EVERY == 0: a = episode_number %", "blue_won_the_game = False red_won_the_game = False for steps_current_game in range(1,", "path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model') print(\"Starting red player\") ### Red Decision", "Red Decision Maker red_decision_maker = Greedy_player.Greedy_player() env.blue_player = Entity(blue_decision_maker) env.red_player", "print(\"Starting tournament!\") print(\"Blue player type: \", Agent_type_str[blue_decision_maker.type()]) if blue_decision_maker.path_model_to_load==None: print(\"Blue", "player starting tournament with trained model: \" , red_decision_maker.path_model_to_load) print(\"Number", "= False for steps_current_game in range(1, MAX_STEPS_PER_EPISODE + 1): #####", "style.use(\"ggplot\") from gym_combat.envs.Arena.CState import State from gym_combat.envs.Arena.Entity import Entity from", "if blue_decision_maker.path_model_to_load==None: print(\"Blue player starting with no model\") else: print(\"Blue", "DQNAgent_keras from gym_combat.envs.Greedy import Greedy_player import matplotlib.pyplot as plt def", "EVALUATE) env.take_action(Color.Blue, action_blue) # take the action! current_episode.print_episode(env, steps_current_game) current_episode.is_terminal", "player\") ### Red Decision Maker red_decision_maker = Greedy_player.Greedy_player() env.blue_player =", "players blue_decision_maker.set_initial_state(observation_for_blue_s0, episode) #red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy players blue_won_the_game", "tournament with trained model: \" , blue_decision_maker.path_model_to_load) print(\"Red player type:", "% EVALUATE_PLAYERS_EVERY if a>=0 and a<EVALUATE_BATCH_SIZE: EVALUATE = True else:", "type: \", Agent_type_str[blue_decision_maker.type()]) if blue_decision_maker.path_model_to_load==None: print(\"Blue player starting with no", "env.get_observation_for_blue() blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1, current_episode.is_terminal, EVALUATE) if steps_current_game ==", "Environment, Episode from gym_combat.envs.Common.constants import * from gym_combat.envs.Qtable import Qtable_DecisionMaker", "start position for the players env.reset_game(episode) # get observation observation_for_blue_s0:", "red_decision_maker.path_model_to_load) print(\"Number of rounds: \", NUM_OF_EPISODES) print(\"~~~ GO! ~~~\\n\\n\") def", "episode in tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True, unit='episodes'): EVALUATE =", "print(\"Blue player starting with no model\") else: print(\"Blue player starting", "Entity(blue_decision_maker) env.red_player = Entity(red_decision_maker) print_start_of_game_info(blue_decision_maker, red_decision_maker) NUM_OF_EPISODES = env.NUMBER_OF_EPISODES for", "\", NUM_OF_EPISODES) print(\"~~~ GO! ~~~\\n\\n\") def evaluate(episode_number): #if episode_number %", "False for steps_current_game in range(1, MAX_STEPS_PER_EPISODE + 1): ##### Blue's", "show_always=False if IS_TRAINING else True) # set new start position", "reached MAX_STEPS_PER_EPISODE current_episode.is_terminal = True if blue_won_the_game or red_won_the_game: break", "blue_decision_maker.set_initial_state(observation_for_blue_s0, episode) #red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy players blue_won_the_game =", "env.get_observation_for_blue() current_episode.print_episode(env, steps_current_game) action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE) env.take_action(Color.Blue, action_blue)", "EVALUATE = True else: EVALUATE = False return EVALUATE def", "# set new start position for the players env.reset_game(episode) #", "if we exited the loop because we reached MAX_STEPS_PER_EPISODE current_episode.is_terminal", "of episode: current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode) env.end_run() if blue_decision_maker.type() ==", "Environment(IS_TRAINING) print(\"Starting Blue player\") blue_decision_maker = DQNAgent_keras.DQNAgent_keras() #blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True,", "SAVE_STATS_EVERY == 0: if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic:", "blue_decision_maker.path_model_to_load==None: print(\"Blue player starting with no model\") else: print(\"Blue player", "\" , blue_decision_maker.path_model_to_load) print(\"Red player type: \", Agent_type_str[red_decision_maker.type()]) if red_decision_maker.path_model_to_load==None:", "== 0: if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0,", "info of episode: current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode) env.end_run() if blue_decision_maker.type()", "current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin) if current_episode.is_terminal:# Blue won", "starting with no model\") else: print(\"Blue player starting tournament with", "= DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model') print(\"Starting red player\") ###", "from gym_combat.envs.Common.constants import * from gym_combat.envs.Qtable import Qtable_DecisionMaker from gym_combat.envs.DQN", "from gym_combat.envs.Greedy import Greedy_player import matplotlib.pyplot as plt def print_start_of_game_info(blue_decision_maker,", "State = env.get_observation_for_blue() blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1, current_episode.is_terminal, EVALUATE) if", "MAX_STEPS_PER_EPISODE: # if we exited the loop because we reached", "True if blue_won_the_game or red_won_the_game: break # for statistics env.update_win_counters(steps_current_game)", "# initialize the decision_makers for the players blue_decision_maker.set_initial_state(observation_for_blue_s0, episode) #red_decision_maker.set_initial_state(observation_for_red_s0,", "blue_decision_maker.get_epsolon(), episode) env.end_run() if blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type() ==", "+ 1): ##### Blue's turn! ##### observation_for_blue_s0: State = env.get_observation_for_blue()", "won the game! blue_won_the_game=True else: ##### Red's turn! ##### observation_for_red_s0:", "observation_for_blue_s0: State = env.get_observation_for_blue() current_episode.print_episode(env, steps_current_game) action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0,", "reward_step_blue, observation_for_blue_s1, current_episode.is_terminal, EVALUATE) if steps_current_game == MAX_STEPS_PER_EPISODE: # if", ", red_decision_maker.path_model_to_load) print(\"Number of rounds: \", NUM_OF_EPISODES) print(\"~~~ GO! ~~~\\n\\n\")", "tournament with trained model: \" , red_decision_maker.path_model_to_load) print(\"Number of rounds:", "for statistics env.update_win_counters(steps_current_game) env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon()) env.evaluate_info(EVALUATE, episode, steps_current_game,", "WinEnum.NoWin) if current_episode.is_terminal: # Blue won the game! red_won_the_game =", "the players blue_decision_maker.set_initial_state(observation_for_blue_s0, episode) #red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy players", "turn! ##### observation_for_red_s0: State = env.get_observation_for_red() action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0,", "Blue's turn! ##### observation_for_blue_s0: State = env.get_observation_for_blue() current_episode.print_episode(env, steps_current_game) action_blue:", "a>=0 and a<EVALUATE_BATCH_SIZE: EVALUATE = True else: EVALUATE = False", "def print_start_of_game_info(blue_decision_maker, red_decision_maker): print(\"Starting tournament!\") print(\"Blue player type: \", Agent_type_str[blue_decision_maker.type()])", "observation_for_blue_s1, current_episode.is_terminal, EVALUATE) if steps_current_game == MAX_STEPS_PER_EPISODE: # if we", "EVALUATE, show_always=False if IS_TRAINING else True) # set new start", "1): ##### Blue's turn! ##### observation_for_blue_s0: State = env.get_observation_for_blue() current_episode.print_episode(env,", "NUM_OF_EPISODES) print(\"~~~ GO! ~~~\\n\\n\") def evaluate(episode_number): #if episode_number % EVALUATE_PLAYERS_EVERY", "EVALUATE_PLAYERS_EVERY if a>=0 and a<EVALUATE_BATCH_SIZE: EVALUATE = True else: EVALUATE", "if current_episode.is_terminal:# Blue won the game! blue_won_the_game=True else: ##### Red's", "blue_decision_maker.path_model_to_load) print(\"Red player type: \", Agent_type_str[red_decision_maker.type()]) if red_decision_maker.path_model_to_load==None: print(\"Red player", "for episode in tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True, unit='episodes'): EVALUATE", "gym_combat.envs.Arena.Environment import Environment, Episode from gym_combat.envs.Common.constants import * from gym_combat.envs.Qtable", "Red's turn! ##### observation_for_red_s0: State = env.get_observation_for_red() action_red: AgentAction =", "import Entity from gym_combat.envs.Arena.Environment import Environment, Episode from gym_combat.envs.Common.constants import", "for the players blue_decision_maker.set_initial_state(observation_for_blue_s0, episode) #red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy", "type: \", Agent_type_str[red_decision_maker.type()]) if red_decision_maker.path_model_to_load==None: print(\"Red player starting with no", "current_episode.episode_reward_red += reward_step_red current_episode.episode_reward_blue += reward_step_blue observation_for_blue_s1: State = env.get_observation_for_blue()", "current_episode = Episode(episode, EVALUATE, show_always=False if IS_TRAINING else True) #", "AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE) env.take_action(Color.Red, action_red) # take the action!", "a = episode_number % EVALUATE_PLAYERS_EVERY if a>=0 and a<EVALUATE_BATCH_SIZE: EVALUATE", "= (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin) if current_episode.is_terminal: # Blue won", "print info of episode: current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode) env.end_run() if", "env.reset_game(episode) # get observation observation_for_blue_s0: State = env.get_observation_for_blue() action_blue =", "import DQNAgent_keras from gym_combat.envs.Greedy import Greedy_player import matplotlib.pyplot as plt", "for steps_current_game in range(1, MAX_STEPS_PER_EPISODE + 1): ##### Blue's turn!", "take the action! current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin) if", "blue_decision_maker.get_epsolon()) env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon()) if current_episode.episode_number % SAVE_STATS_EVERY ==", "episode) env.end_run() if blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic:", "EVALUATE = False return EVALUATE def print_states(observation_for_blue_s0, observation_for_blue_s1): import matplotlib.pyplot", "= Entity(red_decision_maker) print_start_of_game_info(blue_decision_maker, red_decision_maker) NUM_OF_EPISODES = env.NUMBER_OF_EPISODES for episode in", "<filename>gym_combat/gym_combat/envs/main.py from matplotlib import style from tqdm import tqdm style.use(\"ggplot\")", "# for non-greedy players blue_won_the_game = False red_won_the_game = False", "exited the loop because we reached MAX_STEPS_PER_EPISODE current_episode.is_terminal = True", "-1 # initialize the decision_makers for the players blue_decision_maker.set_initial_state(observation_for_blue_s0, episode)", "blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, \"conv\")#env.save_folder_path) # print info of episode: current_episode.print_info_of_episode(env, steps_current_game,", "with trained model: \" , red_decision_maker.path_model_to_load) print(\"Number of rounds: \",", "plt.matshow(observation_for_blue_s0.img) plt.show() plt.matshow(observation_for_blue_s1.img) plt.show() if __name__ == '__main__': env =", "current_episode.is_terminal = True if blue_won_the_game or red_won_the_game: break # for", "steps_current_game) reward_step_blue, reward_step_red = env.handle_reward(steps_current_game) current_episode.episode_reward_red += reward_step_red current_episode.episode_reward_blue +=", "episode, steps_current_game, blue_decision_maker.get_epsolon()) if current_episode.episode_number % SAVE_STATS_EVERY == 0: if", "players env.reset_game(episode) # get observation observation_for_blue_s0: State = env.get_observation_for_blue() action_blue", "from gym_combat.envs.Arena.Environment import Environment, Episode from gym_combat.envs.Common.constants import * from", "Entity from gym_combat.envs.Arena.Environment import Environment, Episode from gym_combat.envs.Common.constants import *", "= -1 # initialize the decision_makers for the players blue_decision_maker.set_initial_state(observation_for_blue_s0,", "print(\"Blue player type: \", Agent_type_str[blue_decision_maker.type()]) if blue_decision_maker.path_model_to_load==None: print(\"Blue player starting", "player starting with no model\") else: print(\"Red player starting tournament", "trained model: \" , red_decision_maker.path_model_to_load) print(\"Number of rounds: \", NUM_OF_EPISODES)", "if steps_current_game == MAX_STEPS_PER_EPISODE: # if we exited the loop", "-3.10min__1620558885.model') print(\"Starting red player\") ### Red Decision Maker red_decision_maker =", "observation_for_blue_s0: State = env.get_observation_for_blue() action_blue = -1 # initialize the", "= env.get_observation_for_blue() action_blue = -1 # initialize the decision_makers for", "blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, env.save_folder_path)", "else: EVALUATE = False return EVALUATE def print_states(observation_for_blue_s0, observation_for_blue_s1): import", "env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon()) env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon()) if current_episode.episode_number", "import Greedy_player import matplotlib.pyplot as plt def print_start_of_game_info(blue_decision_maker, red_decision_maker): print(\"Starting", "or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, \"conv\")#env.save_folder_path) # print info", "DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model') print(\"Starting red player\") ### Red", "or red_won_the_game: break # for statistics env.update_win_counters(steps_current_game) env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game,", "action_blue) # take the action! current_episode.print_episode(env, steps_current_game) current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue)", "new start position for the players env.reset_game(episode) # get observation", "* from gym_combat.envs.Qtable import Qtable_DecisionMaker from gym_combat.envs.DQN import DQNAgent_keras from", "current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin) if current_episode.is_terminal: # Blue", "red_decision_maker) NUM_OF_EPISODES = env.NUMBER_OF_EPISODES for episode in tqdm(range(1, NUM_OF_EPISODES +", "reward_step_red = env.handle_reward(steps_current_game) current_episode.episode_reward_red += reward_step_red current_episode.episode_reward_blue += reward_step_blue observation_for_blue_s1:", "AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, \"conv\")#env.save_folder_path) # print info of episode: current_episode.print_info_of_episode(env,", "EVALUATE) if steps_current_game == MAX_STEPS_PER_EPISODE: # if we exited the", "else: ##### Red's turn! ##### observation_for_red_s0: State = env.get_observation_for_red() action_red:", "-0.04avg_ -3.10min__1620558885.model') print(\"Starting red player\") ### Red Decision Maker red_decision_maker", "else: print(\"Blue player starting tournament with trained model: \" ,", "= env.NUMBER_OF_EPISODES for episode in tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True,", "episode, \"conv\")#env.save_folder_path) # print info of episode: current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(),", "red_decision_maker): print(\"Starting tournament!\") print(\"Blue player type: \", Agent_type_str[blue_decision_maker.type()]) if blue_decision_maker.path_model_to_load==None:", "\", Agent_type_str[blue_decision_maker.type()]) if blue_decision_maker.path_model_to_load==None: print(\"Blue player starting with no model\")", "of rounds: \", NUM_OF_EPISODES) print(\"~~~ GO! ~~~\\n\\n\") def evaluate(episode_number): #if", "gym_combat.envs.DQN import DQNAgent_keras from gym_combat.envs.Greedy import Greedy_player import matplotlib.pyplot as", "steps_current_game, blue_decision_maker.get_epsolon(), episode) env.end_run() if blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type()", "Qtable_DecisionMaker from gym_combat.envs.DQN import DQNAgent_keras from gym_combat.envs.Greedy import Greedy_player import", "# for statistics env.update_win_counters(steps_current_game) env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon()) env.evaluate_info(EVALUATE, episode,", "reward_step_red current_episode.episode_reward_blue += reward_step_blue observation_for_blue_s1: State = env.get_observation_for_blue() blue_decision_maker.update_context(observation_for_blue_s0, action_blue,", "WinEnum.NoWin) if current_episode.is_terminal:# Blue won the game! blue_won_the_game=True else: #####", "matplotlib import style from tqdm import tqdm style.use(\"ggplot\") from gym_combat.envs.Arena.CState", "episode_number % EVALUATE_PLAYERS_EVERY if a>=0 and a<EVALUATE_BATCH_SIZE: EVALUATE = True", "Maker red_decision_maker = Greedy_player.Greedy_player() env.blue_player = Entity(blue_decision_maker) env.red_player = Entity(red_decision_maker)", "current_episode.print_episode(env, steps_current_game) action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE) env.take_action(Color.Blue, action_blue) #", "in range(1, MAX_STEPS_PER_EPISODE + 1): ##### Blue's turn! ##### observation_for_blue_s0:", "import style from tqdm import tqdm style.use(\"ggplot\") from gym_combat.envs.Arena.CState import", "print(\"Red player type: \", Agent_type_str[red_decision_maker.type()]) if red_decision_maker.path_model_to_load==None: print(\"Red player starting", "\", Agent_type_str[red_decision_maker.type()]) if red_decision_maker.path_model_to_load==None: print(\"Red player starting with no model\")", "print(\"Starting Blue player\") blue_decision_maker = DQNAgent_keras.DQNAgent_keras() #blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_", "Blue player\") blue_decision_maker = DQNAgent_keras.DQNAgent_keras() #blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_", "import State from gym_combat.envs.Arena.Entity import Entity from gym_combat.envs.Arena.Environment import Environment,", "if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, \"conv\")#env.save_folder_path)", "steps_current_game, blue_decision_maker.get_epsolon()) if current_episode.episode_number % SAVE_STATS_EVERY == 0: if False:#blue_decision_maker.type()==", "##### observation_for_red_s0: State = env.get_observation_for_red() action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE)", "style from tqdm import tqdm style.use(\"ggplot\") from gym_combat.envs.Arena.CState import State", "env = Environment(IS_TRAINING) print(\"Starting Blue player\") blue_decision_maker = DQNAgent_keras.DQNAgent_keras() #blue_decision_maker", "Decision Maker red_decision_maker = Greedy_player.Greedy_player() env.blue_player = Entity(blue_decision_maker) env.red_player =", "import Qtable_DecisionMaker from gym_combat.envs.DQN import DQNAgent_keras from gym_combat.envs.Greedy import Greedy_player", "env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon()) if current_episode.episode_number % SAVE_STATS_EVERY == 0:", "# take the action! current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin)", "no model\") else: print(\"Red player starting tournament with trained model:", "episode) # for non-greedy players blue_won_the_game = False red_won_the_game =", "env.red_player = Entity(red_decision_maker) print_start_of_game_info(blue_decision_maker, red_decision_maker) NUM_OF_EPISODES = env.NUMBER_OF_EPISODES for episode", "= True current_episode.print_episode(env, steps_current_game) reward_step_blue, reward_step_red = env.handle_reward(steps_current_game) current_episode.episode_reward_red +=", "plt def print_start_of_game_info(blue_decision_maker, red_decision_maker): print(\"Starting tournament!\") print(\"Blue player type: \",", "is not WinEnum.NoWin) if current_episode.is_terminal:# Blue won the game! blue_won_the_game=True", "model\") else: print(\"Red player starting tournament with trained model: \"", "= red_decision_maker.get_action(observation_for_red_s0, EVALUATE) env.take_action(Color.Red, action_red) # take the action! current_episode.is_terminal", "tournament!\") print(\"Blue player type: \", Agent_type_str[blue_decision_maker.type()]) if blue_decision_maker.path_model_to_load==None: print(\"Blue player", "\"conv\")#env.save_folder_path) # print info of episode: current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode)", "env.take_action(Color.Blue, action_blue) # take the action! current_episode.print_episode(env, steps_current_game) current_episode.is_terminal =", "= Environment(IS_TRAINING) print(\"Starting Blue player\") blue_decision_maker = DQNAgent_keras.DQNAgent_keras() #blue_decision_maker =", "player starting tournament with trained model: \" , blue_decision_maker.path_model_to_load) print(\"Red", "tqdm style.use(\"ggplot\") from gym_combat.envs.Arena.CState import State from gym_combat.envs.Arena.Entity import Entity", "gym_combat.envs.Arena.CState import State from gym_combat.envs.Arena.Entity import Entity from gym_combat.envs.Arena.Environment import", "~~~\\n\\n\") def evaluate(episode_number): #if episode_number % EVALUATE_PLAYERS_EVERY == 0: a", "player type: \", Agent_type_str[blue_decision_maker.type()]) if blue_decision_maker.path_model_to_load==None: print(\"Blue player starting with", "if a>=0 and a<EVALUATE_BATCH_SIZE: EVALUATE = True else: EVALUATE =", "red_decision_maker = Greedy_player.Greedy_player() env.blue_player = Entity(blue_decision_maker) env.red_player = Entity(red_decision_maker) print_start_of_game_info(blue_decision_maker,", "+= reward_step_blue observation_for_blue_s1: State = env.get_observation_for_blue() blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1,", "action_blue = -1 # initialize the decision_makers for the players", "AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE) env.take_action(Color.Blue, action_blue) # take the action!", "+= reward_step_red current_episode.episode_reward_blue += reward_step_blue observation_for_blue_s1: State = env.get_observation_for_blue() blue_decision_maker.update_context(observation_for_blue_s0,", "if blue_won_the_game or red_won_the_game: break # for statistics env.update_win_counters(steps_current_game) env.data_for_statistics(current_episode.episode_reward_blue,", "as plt plt.matshow(observation_for_blue_s0.img) plt.show() plt.matshow(observation_for_blue_s1.img) plt.show() if __name__ == '__main__':", "if current_episode.episode_number % SAVE_STATS_EVERY == 0: if False:#blue_decision_maker.type()== AgentType.DQN_keras or", "Greedy_player import matplotlib.pyplot as plt def print_start_of_game_info(blue_decision_maker, red_decision_maker): print(\"Starting tournament!\")", "= DQNAgent_keras.DQNAgent_keras() #blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model') print(\"Starting", "else True) # set new start position for the players", "matplotlib.pyplot as plt plt.matshow(observation_for_blue_s0.img) plt.show() plt.matshow(observation_for_blue_s1.img) plt.show() if __name__ ==", "AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, \"conv\")#env.save_folder_path) # print", "# Blue won the game! red_won_the_game = True current_episode.print_episode(env, steps_current_game)", "#red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy players blue_won_the_game = False red_won_the_game", "set new start position for the players env.reset_game(episode) # get", "env.take_action(Color.Red, action_red) # take the action! current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is", "print_states(observation_for_blue_s0, observation_for_blue_s1): import matplotlib.pyplot as plt plt.matshow(observation_for_blue_s0.img) plt.show() plt.matshow(observation_for_blue_s1.img) plt.show()", "observation_for_blue_s1: State = env.get_observation_for_blue() blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1, current_episode.is_terminal, EVALUATE)", "with trained model: \" , blue_decision_maker.path_model_to_load) print(\"Red player type: \",", "current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon()) env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon()) if current_episode.episode_number %", "observation_for_blue_s1): import matplotlib.pyplot as plt plt.matshow(observation_for_blue_s0.img) plt.show() plt.matshow(observation_for_blue_s1.img) plt.show() if", "from gym_combat.envs.Arena.Entity import Entity from gym_combat.envs.Arena.Environment import Environment, Episode from", "= Greedy_player.Greedy_player() env.blue_player = Entity(blue_decision_maker) env.red_player = Entity(red_decision_maker) print_start_of_game_info(blue_decision_maker, red_decision_maker)", "IS_TRAINING else True) # set new start position for the", "= env.get_observation_for_blue() blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1, current_episode.is_terminal, EVALUATE) if steps_current_game", "for the players env.reset_game(episode) # get observation observation_for_blue_s0: State =", "__name__ == '__main__': env = Environment(IS_TRAINING) print(\"Starting Blue player\") blue_decision_maker", "trained model: \" , blue_decision_maker.path_model_to_load) print(\"Red player type: \", Agent_type_str[red_decision_maker.type()])", "EVALUATE = evaluate(episode) current_episode = Episode(episode, EVALUATE, show_always=False if IS_TRAINING", "the loop because we reached MAX_STEPS_PER_EPISODE current_episode.is_terminal = True if", "steps_current_game, blue_decision_maker.get_epsolon()) env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon()) if current_episode.episode_number % SAVE_STATS_EVERY", "print(\"~~~ GO! ~~~\\n\\n\") def evaluate(episode_number): #if episode_number % EVALUATE_PLAYERS_EVERY ==", "gym_combat.envs.Arena.Entity import Entity from gym_combat.envs.Arena.Environment import Environment, Episode from gym_combat.envs.Common.constants", "= env.handle_reward(steps_current_game) current_episode.episode_reward_red += reward_step_red current_episode.episode_reward_blue += reward_step_blue observation_for_blue_s1: State", "# take the action! current_episode.print_episode(env, steps_current_game) current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is", "State = env.get_observation_for_blue() action_blue = -1 # initialize the decision_makers", "env.update_win_counters(steps_current_game) env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon()) env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon()) if", "Agent_type_str[blue_decision_maker.type()]) if blue_decision_maker.path_model_to_load==None: print(\"Blue player starting with no model\") else:", "not WinEnum.NoWin) if current_episode.is_terminal: # Blue won the game! red_won_the_game", "steps_current_game == MAX_STEPS_PER_EPISODE: # if we exited the loop because", "print(\"Starting red player\") ### Red Decision Maker red_decision_maker = Greedy_player.Greedy_player()", "== '__main__': env = Environment(IS_TRAINING) print(\"Starting Blue player\") blue_decision_maker =", "def print_states(observation_for_blue_s0, observation_for_blue_s1): import matplotlib.pyplot as plt plt.matshow(observation_for_blue_s0.img) plt.show() plt.matshow(observation_for_blue_s1.img)", "print(\"Number of rounds: \", NUM_OF_EPISODES) print(\"~~~ GO! ~~~\\n\\n\") def evaluate(episode_number):", "reward_step_blue observation_for_blue_s1: State = env.get_observation_for_blue() blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1, current_episode.is_terminal,", "as plt def print_start_of_game_info(blue_decision_maker, red_decision_maker): print(\"Starting tournament!\") print(\"Blue player type:", "player type: \", Agent_type_str[red_decision_maker.type()]) if red_decision_maker.path_model_to_load==None: print(\"Red player starting with", "= False red_won_the_game = False for steps_current_game in range(1, MAX_STEPS_PER_EPISODE", "ascii=True, unit='episodes'): EVALUATE = evaluate(episode) current_episode = Episode(episode, EVALUATE, show_always=False", "= env.get_observation_for_blue() current_episode.print_episode(env, steps_current_game) action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE) env.take_action(Color.Blue,", "Greedy_player.Greedy_player() env.blue_player = Entity(blue_decision_maker) env.red_player = Entity(red_decision_maker) print_start_of_game_info(blue_decision_maker, red_decision_maker) NUM_OF_EPISODES", "1), ascii=True, unit='episodes'): EVALUATE = evaluate(episode) current_episode = Episode(episode, EVALUATE,", "action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE) env.take_action(Color.Blue, action_blue) # take the", "Episode from gym_combat.envs.Common.constants import * from gym_combat.envs.Qtable import Qtable_DecisionMaker from", "0: a = episode_number % EVALUATE_PLAYERS_EVERY if a>=0 and a<EVALUATE_BATCH_SIZE:", "non-greedy players blue_won_the_game = False red_won_the_game = False for steps_current_game", "State = env.get_observation_for_red() action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE) env.take_action(Color.Red, action_red)", "red_decision_maker.path_model_to_load==None: print(\"Red player starting with no model\") else: print(\"Red player", "True current_episode.print_episode(env, steps_current_game) reward_step_blue, reward_step_red = env.handle_reward(steps_current_game) current_episode.episode_reward_red += reward_step_red", "from gym_combat.envs.Arena.CState import State from gym_combat.envs.Arena.Entity import Entity from gym_combat.envs.Arena.Environment", "if current_episode.is_terminal: # Blue won the game! red_won_the_game = True", "take the action! current_episode.print_episode(env, steps_current_game) current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not", "won the game! red_won_the_game = True current_episode.print_episode(env, steps_current_game) reward_step_blue, reward_step_red", "break # for statistics env.update_win_counters(steps_current_game) env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon()) env.evaluate_info(EVALUATE,", "player starting with no model\") else: print(\"Blue player starting tournament", "current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode) env.end_run() if blue_decision_maker.type() == AgentType.DQN_keras or", "blue_decision_maker.get_epsolon()) if current_episode.episode_number % SAVE_STATS_EVERY == 0: if False:#blue_decision_maker.type()== AgentType.DQN_keras", "in tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True, unit='episodes'): EVALUATE = evaluate(episode)", "blue_decision_maker = DQNAgent_keras.DQNAgent_keras() #blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model')", "= True else: EVALUATE = False return EVALUATE def print_states(observation_for_blue_s0,", "episode_number % EVALUATE_PLAYERS_EVERY == 0: a = episode_number % EVALUATE_PLAYERS_EVERY", "Entity(red_decision_maker) print_start_of_game_info(blue_decision_maker, red_decision_maker) NUM_OF_EPISODES = env.NUMBER_OF_EPISODES for episode in tqdm(range(1,", "# if we exited the loop because we reached MAX_STEPS_PER_EPISODE", "blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1, current_episode.is_terminal, EVALUATE) if steps_current_game == MAX_STEPS_PER_EPISODE:", "current_episode.episode_number % SAVE_STATS_EVERY == 0: if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type()", "##### observation_for_blue_s0: State = env.get_observation_for_blue() current_episode.print_episode(env, steps_current_game) action_blue: AgentAction =", "Agent_type_str[red_decision_maker.type()]) if red_decision_maker.path_model_to_load==None: print(\"Red player starting with no model\") else:", "model\") else: print(\"Blue player starting tournament with trained model: \"", "model: \" , red_decision_maker.path_model_to_load) print(\"Number of rounds: \", NUM_OF_EPISODES) print(\"~~~", "gym_combat.envs.Common.constants import * from gym_combat.envs.Qtable import Qtable_DecisionMaker from gym_combat.envs.DQN import", "= (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin) if current_episode.is_terminal:# Blue won the", "= True if blue_won_the_game or red_won_the_game: break # for statistics", "action! current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin) if current_episode.is_terminal: #", "import tqdm style.use(\"ggplot\") from gym_combat.envs.Arena.CState import State from gym_combat.envs.Arena.Entity import", "EVALUATE def print_states(observation_for_blue_s0, observation_for_blue_s1): import matplotlib.pyplot as plt plt.matshow(observation_for_blue_s0.img) plt.show()", "== AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, \"conv\")#env.save_folder_path) # print info of episode:", "# get observation observation_for_blue_s0: State = env.get_observation_for_blue() action_blue = -1", "is not WinEnum.NoWin) if current_episode.is_terminal: # Blue won the game!", "= env.get_observation_for_red() action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE) env.take_action(Color.Red, action_red) #", "episode: current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode) env.end_run() if blue_decision_maker.type() == AgentType.DQN_keras", "rounds: \", NUM_OF_EPISODES) print(\"~~~ GO! ~~~\\n\\n\") def evaluate(episode_number): #if episode_number", "players blue_won_the_game = False red_won_the_game = False for steps_current_game in", "from matplotlib import style from tqdm import tqdm style.use(\"ggplot\") from", "print_start_of_game_info(blue_decision_maker, red_decision_maker): print(\"Starting tournament!\") print(\"Blue player type: \", Agent_type_str[blue_decision_maker.type()]) if", "episode) #red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy players blue_won_the_game = False", "DQNAgent_keras.DQNAgent_keras() #blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model') print(\"Starting red", "import * from gym_combat.envs.Qtable import Qtable_DecisionMaker from gym_combat.envs.DQN import DQNAgent_keras", "False return EVALUATE def print_states(observation_for_blue_s0, observation_for_blue_s1): import matplotlib.pyplot as plt", "= blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE) env.take_action(Color.Blue, action_blue) # take the action! current_episode.print_episode(env,", "\" , red_decision_maker.path_model_to_load) print(\"Number of rounds: \", NUM_OF_EPISODES) print(\"~~~ GO!", "### Red Decision Maker red_decision_maker = Greedy_player.Greedy_player() env.blue_player = Entity(blue_decision_maker)", "current_episode.is_terminal:# Blue won the game! blue_won_the_game=True else: ##### Red's turn!", "for non-greedy players blue_won_the_game = False red_won_the_game = False for", "= Episode(episode, EVALUATE, show_always=False if IS_TRAINING else True) # set", "NUM_OF_EPISODES = env.NUMBER_OF_EPISODES for episode in tqdm(range(1, NUM_OF_EPISODES + 1),", "0: if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode,", "(env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin) if current_episode.is_terminal:# Blue won the game!", "else: print(\"Red player starting tournament with trained model: \" ,", "red_decision_maker.get_action(observation_for_red_s0, EVALUATE) env.take_action(Color.Red, action_red) # take the action! current_episode.is_terminal =", "tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True, unit='episodes'): EVALUATE = evaluate(episode) current_episode", "Blue won the game! blue_won_the_game=True else: ##### Red's turn! #####", "and a<EVALUATE_BATCH_SIZE: EVALUATE = True else: EVALUATE = False return", "starting tournament with trained model: \" , blue_decision_maker.path_model_to_load) print(\"Red player", "if IS_TRAINING else True) # set new start position for", "initialize the decision_makers for the players blue_decision_maker.set_initial_state(observation_for_blue_s0, episode) #red_decision_maker.set_initial_state(observation_for_red_s0, episode)", "'__main__': env = Environment(IS_TRAINING) print(\"Starting Blue player\") blue_decision_maker = DQNAgent_keras.DQNAgent_keras()", "env.blue_player = Entity(blue_decision_maker) env.red_player = Entity(red_decision_maker) print_start_of_game_info(blue_decision_maker, red_decision_maker) NUM_OF_EPISODES =", "unit='episodes'): EVALUATE = evaluate(episode) current_episode = Episode(episode, EVALUATE, show_always=False if", "the action! current_episode.print_episode(env, steps_current_game) current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin)", "from tqdm import tqdm style.use(\"ggplot\") from gym_combat.envs.Arena.CState import State from", "steps_current_game) action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE) env.take_action(Color.Blue, action_blue) # take", "matplotlib.pyplot as plt def print_start_of_game_info(blue_decision_maker, red_decision_maker): print(\"Starting tournament!\") print(\"Blue player", "with no model\") else: print(\"Blue player starting tournament with trained", "blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, \"conv\")#env.save_folder_path) # print info of", "the decision_makers for the players blue_decision_maker.set_initial_state(observation_for_blue_s0, episode) #red_decision_maker.set_initial_state(observation_for_red_s0, episode) #", "EVALUATE) env.take_action(Color.Red, action_red) # take the action! current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red)", "red_won_the_game: break # for statistics env.update_win_counters(steps_current_game) env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon())", "action_blue, reward_step_blue, observation_for_blue_s1, current_episode.is_terminal, EVALUATE) if steps_current_game == MAX_STEPS_PER_EPISODE: #", "blue_won_the_game=True else: ##### Red's turn! ##### observation_for_red_s0: State = env.get_observation_for_red()", "print_start_of_game_info(blue_decision_maker, red_decision_maker) NUM_OF_EPISODES = env.NUMBER_OF_EPISODES for episode in tqdm(range(1, NUM_OF_EPISODES", "blue_won_the_game or red_won_the_game: break # for statistics env.update_win_counters(steps_current_game) env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red,", "red_won_the_game = False for steps_current_game in range(1, MAX_STEPS_PER_EPISODE + 1):", "print(\"Blue player starting tournament with trained model: \" , blue_decision_maker.path_model_to_load)", "GO! ~~~\\n\\n\") def evaluate(episode_number): #if episode_number % EVALUATE_PLAYERS_EVERY == 0:", "action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE) env.take_action(Color.Red, action_red) # take the", "import matplotlib.pyplot as plt def print_start_of_game_info(blue_decision_maker, red_decision_maker): print(\"Starting tournament!\") print(\"Blue", "decision_makers for the players blue_decision_maker.set_initial_state(observation_for_blue_s0, episode) #red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for", "blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE) env.take_action(Color.Blue, action_blue) # take the action! current_episode.print_episode(env, steps_current_game)", "from gym_combat.envs.Qtable import Qtable_DecisionMaker from gym_combat.envs.DQN import DQNAgent_keras from gym_combat.envs.Greedy", "= False return EVALUATE def print_states(observation_for_blue_s0, observation_for_blue_s1): import matplotlib.pyplot as", "starting tournament with trained model: \" , red_decision_maker.path_model_to_load) print(\"Number of", "if blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode,", "the game! red_won_the_game = True current_episode.print_episode(env, steps_current_game) reward_step_blue, reward_step_red =", "# print info of episode: current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode) env.end_run()" ]
[ "columns which one of them a linear combination of the", "* channel_t[i]) return array(joint).T def _generate_posteriors(self): joint_t = self.joint.T.copy() outer", "hyper-distribution, i.e., remove columns that contains only zeros and merge", "import Channel from numpy import array, arange, zeros from numpy", "remove columns that contains only zeros and merge columns which", "= self.channel.matrix.T for i in arange(self.channel.num_outputs): joint.append(self.channel.secrets.prior * channel_t[i]) return", "algorithm has time complexity of O(n*m^2) where n is the", "prior : list, numpy.ndarray Prior distribution on the set of", "distribution on the set of secrets. prior[i] is the probability", "Prior distribution on the set of secrets. prior[i] is the", "self.outer = npdelete(self.outer, delete_inner, 0) self.inners = npdelete(self.inners, delete_inner, 1)", "in arange(self.channel.num_outputs): joint.append(self.channel.secrets.prior * channel_t[i]) return array(joint).T def _generate_posteriors(self): joint_t", "distribution. inners : numpy.ndarray Matrix of inner distributions. num_posteriors :", "joint.append(self.channel.secrets.prior * channel_t[i]) return array(joint).T def _generate_posteriors(self): joint_t = self.joint.T.copy()", "i in arange(self.channel.num_outputs): outer.append(joint_t[i].sum()) if outer[i] > 0: joint_t[i] =", "combination of others. Thus algorithm has time complexity of O(n*m^2)", "True # Delete inner j self.outer[i] += self.outer[j] # Merge", ":py:class:`.Channel` class. Once created an instance of :py:class:`.Hyper`, the constructor", "self._check_types(channel) self.channel = channel self.joint = self._generate_joint_distribution() self.outer, self.inners =", "self.channel.update_prior(prior) self.joint = self._generate_joint_distribution() self.outer, self.inners = self._generate_posteriors() self._reduce_hyper() self.num_posteriors", "type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))): raise TypeError('The parameter \\'channel\\'", "zeros from numpy import delete as npdelete class Hyper: def", "prior): \"\"\"Update the prior distribution on set of secrets. The", "probability of occuring zero_prob = self.outer < epsilon self.outer =", "= len(self.outer) def update_prior(self, prior): \"\"\"Update the prior distribution on", "on the set of secrets. prior[i] is the probability of", "array, arange, zeros from numpy import delete as npdelete class", "TypeError('The parameter \\'channel\\' must be a core.channel.Channel object') def _generate_joint_distribution(self):", "number of secrets must match the current number of rows", "Channel from numpy import array, arange, zeros from numpy import", "of secrets and m is the number of outputs in", "of others. Thus algorithm has time complexity of O(n*m^2) where", "remove columns with zeros and merge columns that are a", "self.outer < epsilon self.outer = npdelete(self.outer, zero_prob, 0) self.inners =", "that contains only zeros and merge columns which one of", "secret. \"\"\" self.channel.update_prior(prior) self.joint = self._generate_joint_distribution() self.outer, self.inners = self._generate_posteriors()", "_generate_posteriors(self): joint_t = self.joint.T.copy() outer = [] for i in", "0) self.inners = npdelete(self.inners, zero_prob, 1) delete_inner = [False] *", "in arange(self.channel.num_outputs): outer.append(joint_t[i].sum()) if outer[i] > 0: joint_t[i] = joint_t[i]/outer[i]", "npdelete(self.outer, zero_prob, 0) self.inners = npdelete(self.inners, zero_prob, 1) delete_inner =", "joint, outer and inner distributions. Attributes ---------- channel : core.Channel", "if type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))): raise TypeError('The parameter", "is equal to inner j if (abs(self.inners[:,i] - self.inners[:,j]) <", "time complexity of O(n*m^2) where n is the number of", "labels[i] beeing the real secret. \"\"\" self.channel.update_prior(prior) self.joint = self._generate_joint_distribution()", "['y1'], array([[1],[1]]))): raise TypeError('The parameter \\'channel\\' must be a core.channel.Channel", "numpy import delete as npdelete class Hyper: def __init__(self, channel):", "of secrets. prior[i] is the probability of secret named labels[i]", "\"\"\" epsilon = 10**(-6) # Delete inners that have 0", "has time complexity of O(n*m^2) where n is the number", "arange(self.channel.num_outputs): joint.append(self.channel.secrets.prior * channel_t[i]) return array(joint).T def _generate_posteriors(self): joint_t =", "i is equal to inner j if (abs(self.inners[:,i] - self.inners[:,j])", "inners that have 0 probability of occuring zero_prob = self.outer", "---------- channel : core.Channel Channel object. \"\"\" self._check_types(channel) self.channel =", ": numpy.ndarray Matrix of inner distributions. num_posteriors : int Number", "to have an instance of :py:class:`.Channel` class. Once created an", "the. \"\"\" epsilon = 10**(-6) # Delete inners that have", "create an instance of this class it is class it", "of outputs in the. \"\"\" epsilon = 10**(-6) # Delete", "j into inner i self.outer = npdelete(self.outer, delete_inner, 0) self.inners", ": core.Channel Channel object. joint : numpy.ndarray Matrix of joint", "\\'channel\\' must be a core.channel.Channel object') def _generate_joint_distribution(self): joint =", "contains only zeros and merge columns which one of them", "arange, zeros from numpy import delete as npdelete class Hyper:", "!= type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))): raise TypeError('The parameter \\'channel\\' must", "Hyper: def __init__(self, channel): \"\"\"Hyper-distribution. To create an instance of", "len(self.outer) def _check_types(self, channel): if type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'],", "i in arange(self.channel.num_outputs): joint.append(self.channel.secrets.prior * channel_t[i]) return array(joint).T def _generate_posteriors(self):", "other. Parameters ---------- channel : core.Channel Channel object. \"\"\" self._check_types(channel)", "Attributes ---------- channel : core.Channel Channel object. joint : numpy.ndarray", "list, numpy.ndarray Prior distribution on the set of secrets. prior[i]", "Matrix of joint distribution. outer : numpy.ndarray Outer distribution. inners", "[] for i in arange(self.channel.num_outputs): outer.append(joint_t[i].sum()) if outer[i] > 0:", "of posterior distributions resulted by reducing the hyper-distribution, i.e., remove", "into inner i self.outer = npdelete(self.outer, delete_inner, 0) self.inners =", "def _generate_posteriors(self): joint_t = self.joint.T.copy() outer = [] for i", "numpy import array, arange, zeros from numpy import delete as", "an instance of this class it is class it is", "it is class it is necessary to have an instance", "from libqif.core.channel import Channel from numpy import array, arange, zeros", "= 10**(-6) # Delete inners that have 0 probability of", "self._generate_joint_distribution() self.outer, self.inners = self._generate_posteriors() self._reduce_hyper() self.num_posteriors = len(self.outer) def", "i.e., remove columns that contains only zeros and merge columns", "= [] for i in arange(self.channel.num_outputs): outer.append(joint_t[i].sum()) if outer[i] >", "self.outer[j] # Merge inner j into inner i self.outer =", "probability of secret named labels[i] beeing the real secret. \"\"\"", "j self.outer[i] += self.outer[j] # Merge inner j into inner", "constructor generates the joint, outer and inner distributions. Attributes ----------", "reducing the hyper-distribution, i.e., remove columns that contains only zeros", "the set of secrets. prior[i] is the probability of secret", "created an instance of :py:class:`.Hyper`, the constructor generates the joint,", "the current number of rows of the channel. Parameters ----------", "libqif.core.channel import Channel from numpy import array, arange, zeros from", "---------- prior : list, numpy.ndarray Prior distribution on the set", "combination of the other. Parameters ---------- channel : core.Channel Channel", "def _check_types(self, channel): if type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))):", "10**(-6) # Delete inners that have 0 probability of occuring", "prior distribution on set of secrets. The number of secrets", "zero_prob = self.outer < epsilon self.outer = npdelete(self.outer, zero_prob, 0)", "match the current number of rows of the channel. Parameters", "libqif.core.secrets import Secrets from libqif.core.channel import Channel from numpy import", "columns with zeros and merge columns that are a linear", "arange(i+1, self.inners.shape[1]): # Check if inner i is equal to", "[False] * len(self.outer) for i in arange(self.inners.shape[1]): for j in", "type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))): raise TypeError('The parameter \\'channel\\' must be", "outer and inner distributions. Attributes ---------- channel : core.Channel Channel", "= self._generate_posteriors() self._reduce_hyper() self.num_posteriors = len(self.outer) def update_prior(self, prior): \"\"\"Update", "= npdelete(self.inners, zero_prob, 1) delete_inner = [False] * len(self.outer) for", "the joint, outer and inner distributions. Attributes ---------- channel :", "# Delete inners that have 0 probability of occuring zero_prob", "npdelete class Hyper: def __init__(self, channel): \"\"\"Hyper-distribution. To create an", "\"\"\" self.channel.update_prior(prior) self.joint = self._generate_joint_distribution() self.outer, self.inners = self._generate_posteriors() self._reduce_hyper()", "numpy.ndarray Outer distribution. inners : numpy.ndarray Matrix of inner distributions.", "self.outer, self.inners = self._generate_posteriors() self._reduce_hyper() self.num_posteriors = len(self.outer) def update_prior(self,", "in arange(self.inners.shape[1]): for j in arange(i+1, self.inners.shape[1]): # Check if", "number of outputs in the. \"\"\" epsilon = 10**(-6) #", "channel self.joint = self._generate_joint_distribution() self.outer, self.inners = self._generate_posteriors() self._reduce_hyper() self.num_posteriors", "and inner distributions. Attributes ---------- channel : core.Channel Channel object.", "if outer[i] > 0: joint_t[i] = joint_t[i]/outer[i] return array(outer), joint_t.T", "channel : core.Channel Channel object. \"\"\" self._check_types(channel) self.channel = channel", "zero_prob, 1) delete_inner = [False] * len(self.outer) for i in", "array(joint).T def _generate_posteriors(self): joint_t = self.joint.T.copy() outer = [] for", "j in arange(i+1, self.inners.shape[1]): # Check if inner i is", "inner i is equal to inner j if (abs(self.inners[:,i] -", "joint distribution. outer : numpy.ndarray Outer distribution. inners : numpy.ndarray", "self.outer = npdelete(self.outer, zero_prob, 0) self.inners = npdelete(self.inners, zero_prob, 1)", "zeros and merge columns that are a linear combination of", "= channel self.joint = self._generate_joint_distribution() self.outer, self.inners = self._generate_posteriors() self._reduce_hyper()", "self.num_posteriors = len(self.outer) def _check_types(self, channel): if type(channel) != type(Channel(Secrets(['x1','x2'],", ": int Number of posterior distributions resulted by reducing the", "self.channel = channel self.joint = self._generate_joint_distribution() self.outer, self.inners = self._generate_posteriors()", "secrets and m is the number of outputs in the.", "only zeros and merge columns which one of them a", "is the probability of secret named labels[i] beeing the real", "\"\"\"Update the prior distribution on set of secrets. The number", "def update_prior(self, prior): \"\"\"Update the prior distribution on set of", "a linear combination of others. Thus algorithm has time complexity", "of them a linear combination of the other. Parameters ----------", "Check if inner i is equal to inner j if", "of secrets. The number of secrets must match the current", "self._reduce_hyper() self.num_posteriors = len(self.outer) def update_prior(self, prior): \"\"\"Update the prior", "class it is class it is necessary to have an", "of occuring zero_prob = self.outer < epsilon self.outer = npdelete(self.outer,", "current number of rows of the channel. Parameters ---------- prior", "= joint_t[i]/outer[i] return array(outer), joint_t.T def _reduce_hyper(self): \"\"\"Given the hyper-distribution", "0: joint_t[i] = joint_t[i]/outer[i] return array(outer), joint_t.T def _reduce_hyper(self): \"\"\"Given", "joint : numpy.ndarray Matrix of joint distribution. outer : numpy.ndarray", "import delete as npdelete class Hyper: def __init__(self, channel): \"\"\"Hyper-distribution.", "on set of secrets. The number of secrets must match", "beeing the real secret. \"\"\" self.channel.update_prior(prior) self.joint = self._generate_joint_distribution() self.outer,", "inner j into inner i self.outer = npdelete(self.outer, delete_inner, 0)", "delete_inner[j] = True # Delete inner j self.outer[i] += self.outer[j]", "array(outer), joint_t.T def _reduce_hyper(self): \"\"\"Given the hyper-distribution generated by _generate_posteriors", "merge columns that are a linear combination of others. Thus", "the probability of secret named labels[i] beeing the real secret.", "= self.outer < epsilon self.outer = npdelete(self.outer, zero_prob, 0) self.inners", "= self.joint.T.copy() outer = [] for i in arange(self.channel.num_outputs): outer.append(joint_t[i].sum())", "[] channel_t = self.channel.matrix.T for i in arange(self.channel.num_outputs): joint.append(self.channel.secrets.prior *", "number of secrets and m is the number of outputs", "that are a linear combination of others. Thus algorithm has", "set of secrets. prior[i] is the probability of secret named", "parameter \\'channel\\' must be a core.channel.Channel object') def _generate_joint_distribution(self): joint", "Parameters ---------- channel : core.Channel Channel object. \"\"\" self._check_types(channel) self.channel", "of this class it is class it is necessary to", "hyper-distribution generated by _generate_posteriors remove columns with zeros and merge", "= [False] * len(self.outer) for i in arange(self.inners.shape[1]): for j", "instance of this class it is class it is necessary", "= True # Delete inner j self.outer[i] += self.outer[j] #", "the hyper-distribution, i.e., remove columns that contains only zeros and", "# Merge inner j into inner i self.outer = npdelete(self.outer,", "1) delete_inner = [False] * len(self.outer) for i in arange(self.inners.shape[1]):", "numpy.ndarray Matrix of joint distribution. outer : numpy.ndarray Outer distribution.", "self.channel.secrets.num_secrets: delete_inner[j] = True # Delete inner j self.outer[i] +=", "resulted by reducing the hyper-distribution, i.e., remove columns that contains", "columns that are a linear combination of others. Thus algorithm", "instance of :py:class:`.Hyper`, the constructor generates the joint, outer and", "int Number of posterior distributions resulted by reducing the hyper-distribution,", "this class it is class it is necessary to have", "self.channel.matrix.T for i in arange(self.channel.num_outputs): joint.append(self.channel.secrets.prior * channel_t[i]) return array(joint).T", "posterior distributions resulted by reducing the hyper-distribution, i.e., remove columns", "import array, arange, zeros from numpy import delete as npdelete", "others. Thus algorithm has time complexity of O(n*m^2) where n", "n is the number of secrets and m is the", "numpy.ndarray Matrix of inner distributions. num_posteriors : int Number of", "for j in arange(i+1, self.inners.shape[1]): # Check if inner i", "must match the current number of rows of the channel.", "_generate_joint_distribution(self): joint = [] channel_t = self.channel.matrix.T for i in", "= [] channel_t = self.channel.matrix.T for i in arange(self.channel.num_outputs): joint.append(self.channel.secrets.prior", "return array(joint).T def _generate_posteriors(self): joint_t = self.joint.T.copy() outer = []", "if inner i is equal to inner j if (abs(self.inners[:,i]", "joint_t.T def _reduce_hyper(self): \"\"\"Given the hyper-distribution generated by _generate_posteriors remove", "self.num_posteriors = len(self.outer) def update_prior(self, prior): \"\"\"Update the prior distribution", "must be a core.channel.Channel object') def _generate_joint_distribution(self): joint = []", "len(self.outer) for i in arange(self.inners.shape[1]): for j in arange(i+1, self.inners.shape[1]):", "class Hyper: def __init__(self, channel): \"\"\"Hyper-distribution. To create an instance", "self.inners.shape[1]): # Check if inner i is equal to inner", "> 0: joint_t[i] = joint_t[i]/outer[i] return array(outer), joint_t.T def _reduce_hyper(self):", "and merge columns which one of them a linear combination", "channel): \"\"\"Hyper-distribution. To create an instance of this class it", "raise TypeError('The parameter \\'channel\\' must be a core.channel.Channel object') def", "distribution. outer : numpy.ndarray Outer distribution. inners : numpy.ndarray Matrix", "a linear combination of the other. Parameters ---------- channel :", "object. joint : numpy.ndarray Matrix of joint distribution. outer :", "object. \"\"\" self._check_types(channel) self.channel = channel self.joint = self._generate_joint_distribution() self.outer,", "the constructor generates the joint, outer and inner distributions. Attributes", "generates the joint, outer and inner distributions. Attributes ---------- channel", "is the number of outputs in the. \"\"\" epsilon =", "object') def _generate_joint_distribution(self): joint = [] channel_t = self.channel.matrix.T for", "= npdelete(self.outer, zero_prob, 0) self.inners = npdelete(self.inners, zero_prob, 1) delete_inner", "outer.append(joint_t[i].sum()) if outer[i] > 0: joint_t[i] = joint_t[i]/outer[i] return array(outer),", "necessary to have an instance of :py:class:`.Channel` class. Once created", "self.inners = npdelete(self.inners, zero_prob, 1) delete_inner = [False] * len(self.outer)", "equal to inner j if (abs(self.inners[:,i] - self.inners[:,j]) < epsilon).sum()", "linear combination of the other. Parameters ---------- channel : core.Channel", "named labels[i] beeing the real secret. \"\"\" self.channel.update_prior(prior) self.joint =", "as npdelete class Hyper: def __init__(self, channel): \"\"\"Hyper-distribution. To create", "self.inners = self._generate_posteriors() self._reduce_hyper() self.num_posteriors = len(self.outer) def update_prior(self, prior):", "Thus algorithm has time complexity of O(n*m^2) where n is", "in the. \"\"\" epsilon = 10**(-6) # Delete inners that", "array([[1],[1]]))): raise TypeError('The parameter \\'channel\\' must be a core.channel.Channel object')", "rows of the channel. Parameters ---------- prior : list, numpy.ndarray", ": numpy.ndarray Matrix of joint distribution. outer : numpy.ndarray Outer", "The number of secrets must match the current number of", "the prior distribution on set of secrets. The number of", "self.outer[i] += self.outer[j] # Merge inner j into inner i", "Once created an instance of :py:class:`.Hyper`, the constructor generates the", "with zeros and merge columns that are a linear combination", "Delete inner j self.outer[i] += self.outer[j] # Merge inner j", "O(n*m^2) where n is the number of secrets and m", "complexity of O(n*m^2) where n is the number of secrets", "= self._generate_joint_distribution() self.outer, self.inners = self._generate_posteriors() self._reduce_hyper() self.num_posteriors = len(self.outer)", "outer = [] for i in arange(self.channel.num_outputs): outer.append(joint_t[i].sum()) if outer[i]", "of :py:class:`.Hyper`, the constructor generates the joint, outer and inner", "outputs in the. \"\"\" epsilon = 10**(-6) # Delete inners", "delete_inner = [False] * len(self.outer) for i in arange(self.inners.shape[1]): for", "instance of :py:class:`.Channel` class. Once created an instance of :py:class:`.Hyper`,", "Outer distribution. inners : numpy.ndarray Matrix of inner distributions. num_posteriors", "# Delete inner j self.outer[i] += self.outer[j] # Merge inner", "prior[i] is the probability of secret named labels[i] beeing the", "by _generate_posteriors remove columns with zeros and merge columns that", "_check_types(self, channel): if type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))): raise", "Parameters ---------- prior : list, numpy.ndarray Prior distribution on the", "the channel. Parameters ---------- prior : list, numpy.ndarray Prior distribution", "inner j self.outer[i] += self.outer[j] # Merge inner j into", "columns that contains only zeros and merge columns which one", "_generate_posteriors remove columns with zeros and merge columns that are", "from libqif.core.secrets import Secrets from libqif.core.channel import Channel from numpy", "Channel object. joint : numpy.ndarray Matrix of joint distribution. outer", "class. Once created an instance of :py:class:`.Hyper`, the constructor generates", ": core.Channel Channel object. \"\"\" self._check_types(channel) self.channel = channel self.joint", "secret named labels[i] beeing the real secret. \"\"\" self.channel.update_prior(prior) self.joint", "self._reduce_hyper() self.num_posteriors = len(self.outer) def _check_types(self, channel): if type(channel) !=", "# Check if inner i is equal to inner j", "len(self.outer) def update_prior(self, prior): \"\"\"Update the prior distribution on set", "if (abs(self.inners[:,i] - self.inners[:,j]) < epsilon).sum() == self.channel.secrets.num_secrets: delete_inner[j] =", "< epsilon self.outer = npdelete(self.outer, zero_prob, 0) self.inners = npdelete(self.inners,", "update_prior(self, prior): \"\"\"Update the prior distribution on set of secrets.", "num_posteriors : int Number of posterior distributions resulted by reducing", "\"\"\" self._check_types(channel) self.channel = channel self.joint = self._generate_joint_distribution() self.outer, self.inners", "one of them a linear combination of the other. Parameters", "< epsilon).sum() == self.channel.secrets.num_secrets: delete_inner[j] = True # Delete inner", "channel : core.Channel Channel object. joint : numpy.ndarray Matrix of", "arange(self.channel.num_outputs): outer.append(joint_t[i].sum()) if outer[i] > 0: joint_t[i] = joint_t[i]/outer[i] return", "from numpy import array, arange, zeros from numpy import delete", "of secret named labels[i] beeing the real secret. \"\"\" self.channel.update_prior(prior)", "self.joint = self._generate_joint_distribution() self.outer, self.inners = self._generate_posteriors() self._reduce_hyper() self.num_posteriors =", "a core.channel.Channel object') def _generate_joint_distribution(self): joint = [] channel_t =", "that have 0 probability of occuring zero_prob = self.outer <", "secrets must match the current number of rows of the", "to inner j if (abs(self.inners[:,i] - self.inners[:,j]) < epsilon).sum() ==", "of the channel. Parameters ---------- prior : list, numpy.ndarray Prior", "linear combination of others. Thus algorithm has time complexity of", "* len(self.outer) for i in arange(self.inners.shape[1]): for j in arange(i+1,", "secrets. The number of secrets must match the current number", "in arange(i+1, self.inners.shape[1]): # Check if inner i is equal", "core.Channel Channel object. \"\"\" self._check_types(channel) self.channel = channel self.joint =", "the number of secrets and m is the number of", "distributions resulted by reducing the hyper-distribution, i.e., remove columns that", "\"\"\"Given the hyper-distribution generated by _generate_posteriors remove columns with zeros", "of O(n*m^2) where n is the number of secrets and", "joint = [] channel_t = self.channel.matrix.T for i in arange(self.channel.num_outputs):", "0 probability of occuring zero_prob = self.outer < epsilon self.outer", "\"\"\"Hyper-distributions.\"\"\" from libqif.core.secrets import Secrets from libqif.core.channel import Channel from", "def _generate_joint_distribution(self): joint = [] channel_t = self.channel.matrix.T for i", "i self.outer = npdelete(self.outer, delete_inner, 0) self.inners = npdelete(self.inners, delete_inner,", "core.channel.Channel object') def _generate_joint_distribution(self): joint = [] channel_t = self.channel.matrix.T", "have 0 probability of occuring zero_prob = self.outer < epsilon", "self._generate_posteriors() self._reduce_hyper() self.num_posteriors = len(self.outer) def update_prior(self, prior): \"\"\"Update the", "channel): if type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))): raise TypeError('The", "def _reduce_hyper(self): \"\"\"Given the hyper-distribution generated by _generate_posteriors remove columns", "occuring zero_prob = self.outer < epsilon self.outer = npdelete(self.outer, zero_prob,", "To create an instance of this class it is class", "secrets. prior[i] is the probability of secret named labels[i] beeing", "epsilon = 10**(-6) # Delete inners that have 0 probability", "= self._generate_posteriors() self._reduce_hyper() self.num_posteriors = len(self.outer) def _check_types(self, channel): if", "inner distributions. num_posteriors : int Number of posterior distributions resulted", "the other. Parameters ---------- channel : core.Channel Channel object. \"\"\"", "- self.inners[:,j]) < epsilon).sum() == self.channel.secrets.num_secrets: delete_inner[j] = True #", "where n is the number of secrets and m is", "set of secrets. The number of secrets must match the", "them a linear combination of the other. Parameters ---------- channel", "def __init__(self, channel): \"\"\"Hyper-distribution. To create an instance of this", "is the number of secrets and m is the number", "zero_prob, 0) self.inners = npdelete(self.inners, zero_prob, 1) delete_inner = [False]", "for i in arange(self.channel.num_outputs): joint.append(self.channel.secrets.prior * channel_t[i]) return array(joint).T def", "distributions. Attributes ---------- channel : core.Channel Channel object. joint :", "generated by _generate_posteriors remove columns with zeros and merge columns", "of :py:class:`.Channel` class. Once created an instance of :py:class:`.Hyper`, the", "---------- channel : core.Channel Channel object. joint : numpy.ndarray Matrix", "of the other. Parameters ---------- channel : core.Channel Channel object.", "joint_t[i] = joint_t[i]/outer[i] return array(outer), joint_t.T def _reduce_hyper(self): \"\"\"Given the", "Delete inners that have 0 probability of occuring zero_prob =", "of secrets must match the current number of rows of", "of rows of the channel. Parameters ---------- prior : list,", "joint_t[i]/outer[i] return array(outer), joint_t.T def _reduce_hyper(self): \"\"\"Given the hyper-distribution generated", "epsilon self.outer = npdelete(self.outer, zero_prob, 0) self.inners = npdelete(self.inners, zero_prob,", "for i in arange(self.channel.num_outputs): outer.append(joint_t[i].sum()) if outer[i] > 0: joint_t[i]", "= len(self.outer) def _check_types(self, channel): if type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]),", "from numpy import delete as npdelete class Hyper: def __init__(self,", "the real secret. \"\"\" self.channel.update_prior(prior) self.joint = self._generate_joint_distribution() self.outer, self.inners", "real secret. \"\"\" self.channel.update_prior(prior) self.joint = self._generate_joint_distribution() self.outer, self.inners =", "channel_t = self.channel.matrix.T for i in arange(self.channel.num_outputs): joint.append(self.channel.secrets.prior * channel_t[i])", ": list, numpy.ndarray Prior distribution on the set of secrets.", "and m is the number of outputs in the. \"\"\"", "self.inners = self._generate_posteriors() self._reduce_hyper() self.num_posteriors = len(self.outer) def _check_types(self, channel):", "outer[i] > 0: joint_t[i] = joint_t[i]/outer[i] return array(outer), joint_t.T def", "joint_t = self.joint.T.copy() outer = [] for i in arange(self.channel.num_outputs):", "return array(outer), joint_t.T def _reduce_hyper(self): \"\"\"Given the hyper-distribution generated by", "inner j if (abs(self.inners[:,i] - self.inners[:,j]) < epsilon).sum() == self.channel.secrets.num_secrets:", "of inner distributions. num_posteriors : int Number of posterior distributions", "inner i self.outer = npdelete(self.outer, delete_inner, 0) self.inners = npdelete(self.inners,", "distribution on set of secrets. The number of secrets must", "zeros and merge columns which one of them a linear", "Channel object. \"\"\" self._check_types(channel) self.channel = channel self.joint = self._generate_joint_distribution()", "self.outer, self.inners = self._generate_posteriors() self._reduce_hyper() self.num_posteriors = len(self.outer) def _check_types(self,", "of joint distribution. outer : numpy.ndarray Outer distribution. inners :", "outer : numpy.ndarray Outer distribution. inners : numpy.ndarray Matrix of", "Matrix of inner distributions. num_posteriors : int Number of posterior", "Number of posterior distributions resulted by reducing the hyper-distribution, i.e.,", "by reducing the hyper-distribution, i.e., remove columns that contains only", "number of rows of the channel. Parameters ---------- prior :", "_reduce_hyper(self): \"\"\"Given the hyper-distribution generated by _generate_posteriors remove columns with", "self._generate_posteriors() self._reduce_hyper() self.num_posteriors = len(self.outer) def _check_types(self, channel): if type(channel)", "the number of outputs in the. \"\"\" epsilon = 10**(-6)", "Secrets from libqif.core.channel import Channel from numpy import array, arange,", "inner distributions. Attributes ---------- channel : core.Channel Channel object. joint", "which one of them a linear combination of the other.", "npdelete(self.inners, zero_prob, 1) delete_inner = [False] * len(self.outer) for i", "and merge columns that are a linear combination of others.", "for i in arange(self.inners.shape[1]): for j in arange(i+1, self.inners.shape[1]): #", ":py:class:`.Hyper`, the constructor generates the joint, outer and inner distributions.", "arange(self.inners.shape[1]): for j in arange(i+1, self.inners.shape[1]): # Check if inner", "merge columns which one of them a linear combination of", "be a core.channel.Channel object') def _generate_joint_distribution(self): joint = [] channel_t", "is class it is necessary to have an instance of", "+= self.outer[j] # Merge inner j into inner i self.outer", "an instance of :py:class:`.Channel` class. Once created an instance of", "[1,0]), ['y1'], array([[1],[1]]))): raise TypeError('The parameter \\'channel\\' must be a", "delete as npdelete class Hyper: def __init__(self, channel): \"\"\"Hyper-distribution. To", "inners : numpy.ndarray Matrix of inner distributions. num_posteriors : int", "have an instance of :py:class:`.Channel` class. Once created an instance", "m is the number of outputs in the. \"\"\" epsilon", "__init__(self, channel): \"\"\"Hyper-distribution. To create an instance of this class", "it is necessary to have an instance of :py:class:`.Channel` class.", "epsilon).sum() == self.channel.secrets.num_secrets: delete_inner[j] = True # Delete inner j", "distributions. num_posteriors : int Number of posterior distributions resulted by", "are a linear combination of others. Thus algorithm has time", "channel. Parameters ---------- prior : list, numpy.ndarray Prior distribution on", "channel_t[i]) return array(joint).T def _generate_posteriors(self): joint_t = self.joint.T.copy() outer =", "core.Channel Channel object. joint : numpy.ndarray Matrix of joint distribution.", "== self.channel.secrets.num_secrets: delete_inner[j] = True # Delete inner j self.outer[i]", "class it is necessary to have an instance of :py:class:`.Channel`", "an instance of :py:class:`.Hyper`, the constructor generates the joint, outer", "(abs(self.inners[:,i] - self.inners[:,j]) < epsilon).sum() == self.channel.secrets.num_secrets: delete_inner[j] = True", "Merge inner j into inner i self.outer = npdelete(self.outer, delete_inner,", "\"\"\"Hyper-distribution. To create an instance of this class it is", "self.joint.T.copy() outer = [] for i in arange(self.channel.num_outputs): outer.append(joint_t[i].sum()) if", "self.inners[:,j]) < epsilon).sum() == self.channel.secrets.num_secrets: delete_inner[j] = True # Delete", "i in arange(self.inners.shape[1]): for j in arange(i+1, self.inners.shape[1]): # Check", "import Secrets from libqif.core.channel import Channel from numpy import array,", "is necessary to have an instance of :py:class:`.Channel` class. Once", "the hyper-distribution generated by _generate_posteriors remove columns with zeros and", "j if (abs(self.inners[:,i] - self.inners[:,j]) < epsilon).sum() == self.channel.secrets.num_secrets: delete_inner[j]", "numpy.ndarray Prior distribution on the set of secrets. prior[i] is", ": numpy.ndarray Outer distribution. inners : numpy.ndarray Matrix of inner" ]
[ "- Number of failures before server is considered lost (1", "name. required: true type: str packet_size: description: - Packet size", "selection. type: str choices: - latency - jitter - packet-loss", "- Run as a local_action in your playbook requirements: -", "str choices: - enable - disable src6: description: - Source", "\"tos_mask\": {\"required\": False, \"type\": \"str\"}, \"users\": {\"required\": False, \"type\": \"list\",", "firewall.addrgrp.name)\" dst_negate: \"enable\" dst6: - name: \"default_name_64 (source firewall.address6.name firewall.addrgrp6.name)\"", "\"type\": \"int\"}, \"id\": {\"required\": True, \"type\": \"int\"}, \"input_device\": {\"required\": False,", "\"type\": \"int\"}, \"security_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"none\", \"authentication\"]},", "- Internet Service group name. Source firewall.internet-service-group.name. required: true type:", "interface (0 - 4294967295). Used for SD-WAN rules or priority", "module.params['password'] is not None if not legacy_mode: if module._socket_path: connection", "of service evaluated bits. type: str users: description: - User", "values = percentage of link volume, 0 - 255). type:", "using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params", "str sample: \"17.0.2.10658\" serial: description: Serial number of the unit", "for latency (ms). type: int threshold_warning_packetloss: description: - Warning threshold", "description: - Port number used to communicate with the server", "description: - Measured volume ratio (this value / sum of", "higher weights. type: int service: description: - Create SD-WAN rules", "gateway: \"<your_own_value>\" gateway6: \"<your_own_value>\" ingress_spillover_threshold: \"42\" interface: \"<your_own_value> (source system.interface.name)\"", "\"enable\"]}, \"volume_ratio\": {\"required\": False, \"type\": \"int\"}, \"weight\": {\"required\": False, \"type\":", "{\"required\": True, \"type\": \"str\"} }}, \"dst_negate\": {\"required\": False, \"type\": \"str\",", "\"members\": {\"required\": False, \"type\": \"list\", \"options\": { \"comment\": {\"required\": False,", "use for load balancing Internet traffic to SD-WAN members. type:", "fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg=\"Error in repo\",", "Warning threshold for latency (ms). type: int threshold_warning_packetloss: description: -", "disable load_balance_mode: description: - Algorithm or mode to use for", "{\"required\": False, \"type\": \"int\"}, \"default\": {\"required\": False, \"type\": \"str\", \"choices\":", "SD-WAN verifies that the FortiGate can communicate with it. type:", "filter_system_virtual_wan_link_data(json): option_list = ['fail_alert_interfaces', 'fail_detect', 'health_check', 'load_balance_mode', 'members', 'service', 'status']", "This program is free software: you can redistribute it and/or", "int source: description: - Source IP address used in the", "FortiGate interfaces added to the virtual-wan-link. type: list suboptions: comment:", "False, \"type\": \"int\"}, \"latency_threshold\": {\"required\": False, \"type\": \"int\"}, \"link_cost_factor\": {\"required\":", "FortiOSHandler(connection) is_error, has_changed, result = fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else:", "interfaces with higher weights. type: int service: description: - Create", "SD-WAN (formerly virtual WAN link) in Fortinet's FortiOS and FortiGate.", "author: - <NAME> (@mamunozgonzalez) - <NAME> (@thomnico) notes: - Requires", "\"enable\" dst6: - name: \"default_name_64 (source firewall.address6.name firewall.addrgrp6.name)\" end_port: \"65\"", "by Fortinet - Run as a local_action in your playbook", "or FortiGate password. type: str default: \"\" vdom: description: -", "description: - Link cost factor. type: str choices: - latency", "__future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc.", "\"70\" id: \"71\" input_device: - name: \"default_name_73 (source system.interface.name)\" internet_service:", "\"17.0.2.10658\" serial: description: Serial number of the unit returned: always", "into FortiGate returned: always type: str sample: 'PUT' http_status: description:", "\"recoverytime\": {\"required\": False, \"type\": \"int\"}, \"security_mode\": {\"required\": False, \"type\": \"str\",", "\"type\": \"int\"}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"disable\", \"enable\"]},", "str sample: 'PUT' http_status: description: Last result given by FortiGate", "description: - Enable/disable SD-WAN Internet connection status checking (failure detection).", "required: true type: str dst_negate: description: - Enable/disable negation of", "\"str\", \"choices\": [\"none\", \"authentication\"]}, \"server\": {\"required\": False, \"type\": \"str\"}, \"sla\":", "str choices: - enable - disable tos: description: - Type", "proper CA. type: bool default: true version_added: 2.9 system_virtual_wan_link: description:", "the primary member (0 - 10000000). type: int id: description:", "False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"}", "= data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']:", "Warning threshold for packet loss (percentage). type: int update_cascade_interface: description:", "- enable volume_ratio: description: - Measured volume ratio (this value", "type: int threshold_warning_latency: description: - Warning threshold for latency (ms).", "enable - disable src6: description: - Source address6 name. type:", "required: true type: str health_check: description: - Health check. Source", "False, \"type\": \"str\"}, \"username\": {\"required\": False, \"type\": \"str\"}, \"password\": {\"required\":", "quality_link: \"98\" route_tag: \"99\" sla: - health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\"", "\"False\" tasks: - name: Configure redundant internet connections using SD-WAN", "address match. type: str choices: - enable - disable dst6:", "\"type\": \"str\", \"choices\": [\"ping\", \"tcp-echo\", \"udp-echo\", \"http\", \"twamp\", \"ping6\"]}, \"recoverytime\":", "\"type\": \"str\"}, \"spillover_threshold\": {\"required\": False, \"type\": \"int\"}, \"status\": {\"required\": False,", "- <NAME> (@thomnico) notes: - Requires fortiosapi library developed by", "for attribute in option_list: if attribute in json and json[attribute]", "- IP address or FQDN name of the server. type:", "- Algorithm or mode to use for load balancing Internet", "null type: dict suboptions: fail_alert_interfaces: description: - Physical interfaces that", "type: list suboptions: health_check: description: - Virtual WAN Link health-check.", "\"type\": \"str\"}, \"gateway\": {\"required\": False, \"type\": \"str\"}, \"gateway6\": {\"required\": False,", "- Indicates if the requests towards FortiGate must use HTTPS", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward_tag\": {\"required\": False, \"type\":", "False, \"type\": \"int\"}, \"update_cascade_interface\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "and modify system feature and virtual_wan_link category. Examples include all", "to physical interfaces in the SD-WAN. type: list suboptions: addr_mode:", "\"health_check\": {\"required\": False, \"type\": \"str\"}, \"id\": {\"required\": False, \"type\": \"int\"}", "with FOS v6.0.5 version_added: \"2.8\" author: - <NAME> (@mamunozgonzalez) -", "level agreement (SLA). type: list suboptions: id: description: - SLA", "\"type\": \"bool\", \"default\": True}, \"system_virtual_wan_link\": { \"required\": False, \"type\": \"dict\",", "\"type\": \"list\", \"options\": { \"id\": {\"required\": True, \"type\": \"int\"} }},", "- enable - disable dscp_forward: description: - Enable/disable forward traffic", "[\"ping\", \"tcp-echo\", \"udp-echo\", \"http\", \"twamp\", \"ping6\"]}, \"recoverytime\": {\"required\": False, \"type\":", "type: str health_check: description: - Health check. Source system.virtual-wan-link.health-check.name. type:", "Path of the table used to fulfill the request returned:", "False, \"type\": \"int\"} }}, \"src\": {\"required\": False, \"type\": \"list\", \"options\":", "[\"enable\", \"disable\"]}, \"src6\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "seq_num: \"96 (source system.virtual-wan-link.members.seq-num)\" protocol: \"97\" quality_link: \"98\" route_tag: \"99\"", "int latency_weight: description: - Coefficient of latency in the formula", "name: \"default_name_4 (source system.interface.name)\" fail_detect: \"enable\" health_check: - addr_mode: \"ipv4\"", "virtual WAN link) in Fortinet's FortiOS and FortiGate. description: -", "Configure redundant internet connections using SD-WAN (formerly virtual WAN link)", "internet_service_ctrl_group: description: - Control-based Internet Service group list. type: list", "2.9 system_virtual_wan_link: description: - Configure redundant internet connections using SD-WAN", "request returned: always type: str sample: \"urlfilter\" path: description: Path", "description: - Latency for SLA to make decision in milliseconds.", "Enable/disable SD-WAN service gateway. type: str choices: - enable -", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward_tag\": {\"required\": False, \"type\": \"str\"},", "{\"required\": True, \"type\": \"int\"} }}, \"internet_service_ctrl_group\": {\"required\": False, \"type\": \"list\",", "sessions spill over to other interfaces in the SD-WAN. type:", "name: \"default_name_73 (source system.interface.name)\" internet_service: \"enable\" internet_service_ctrl: - id: \"76\"", "\"weight-based\", \"usage-based\", \"source-dest-ip-based\", \"measured-volume-based\"]}, \"members\": {\"required\": False, \"type\": \"list\", \"options\":", "type: list suboptions: id: description: - SLA ID. required: true", "Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_system(module.params, fos)", "{\"required\": False, \"type\": \"str\"}, \"spillover_threshold\": {\"required\": False, \"type\": \"int\"}, \"status\":", "to the server. type: str source6: description: - Source IPv6", "as default service. type: str choices: - enable - disable", "try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg=\"fortiosapi module is", "\"jitter_threshold\": {\"required\": False, \"type\": \"int\"}, \"latency_threshold\": {\"required\": False, \"type\": \"int\"},", "or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU", "WAN Link health-check. Source system.virtual-wan-link.health-check.name. type: str id: description: -", "FortiGate that can be configured and used as a different", "choices: - disable - enable volume_ratio: description: - Measured volume", "def fortios_system(data, fos): if data['system_virtual_wan_link']: resp = system_virtual_wan_link(data, fos) return", "type: str choices: - none - authentication server: description: -", "False, \"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"failtime\": {\"required\": False, \"type\":", "str required: false password: description: - FortiOS or FortiGate password.", "{\"required\": False, \"type\": \"int\"} }}, \"src\": {\"required\": False, \"type\": \"list\",", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"health_check\": {\"required\": False,", "False, \"type\": \"int\"} }}, \"protocol\": {\"required\": False, \"type\": \"int\"}, \"quality_link\":", "of the GNU General Public License as published by #", "- Group name. Source user.group.name. required: true type: str health_check:", "{\"required\": False, \"type\": \"int\"}, \"interface\": {\"required\": False, \"type\": \"str\"}, \"priority\":", "If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status':", "description: - IPv4 route map route-tag. type: int sla: description:", "ImportError: module.fail_json(msg=\"fortiosapi module is required\") fos = FortiOSAPI() login(module.params, fos)", "fields = { \"host\": {\"required\": False, \"type\": \"str\"}, \"username\": {\"required\":", "description: - URL used to communicate with the server if", "how SD-WAN verifies that the FortiGate can communicate with it.", "description: - Ingress spillover threshold for this interface (0 -", "- Enable/disable SD-WAN service. type: str choices: - enable -", "\"type\": \"str\"}, \"dst\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "can redistribute it and/or modify # it under the terms", "firewall.internet-service-custom-group.name. required: true type: str internet_service_group: description: - Internet Service", "name: \"default_name_80 (source firewall.internet-service-custom.name)\" internet_service_custom_group: - name: \"default_name_82 (source firewall.internet-service-custom-group.name)\"", "Member sequence number. Source system.virtual-wan-link.members.seq-num. type: int protocol: description: -", "- Comments. type: str gateway: description: - The default gateway", "Name of the table used to fulfill the request returned:", "dictionary = {} for attribute in option_list: if attribute in", "jitter_weight: description: - Coefficient of jitter in the formula of", "- packet-loss packetloss_threshold: description: - Packet loss for SLA to", "This module is able to configure a FortiGate or FortiOS", "firewall.internet-service-custom.name)\" internet_service_custom_group: - name: \"default_name_82 (source firewall.internet-service-custom-group.name)\" internet_service_group: - name:", "type: str packet_loss_weight: description: - Coefficient of packet-loss in the", "of the operation's result returned: always type: str sample: \"success\"", "module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg=\"Error in repo\", meta=result) if __name__ ==", "system.virtual-wan-link.members.seq-num. type: int name: description: - Status check or health", "returned: always type: str sample: \"17.0.2.10658\" serial: description: Serial number", "a different unit. type: str default: root https: description: -", "\"list\", \"options\": { \"id\": {\"required\": True, \"type\": \"int\"} }}, \"jitter_weight\":", "type: str internet_service_custom: description: - Custom Internet service name list.", "by a proper CA. type: bool default: true version_added: 2.9", "\"ipv6\"]}, \"bandwidth_weight\": {\"required\": False, \"type\": \"int\"}, \"default\": {\"required\": False, \"type\":", "ID. required: true type: int jitter_threshold: description: - Jitter for", "enable - disable internet_service_ctrl: description: - Control-based Internet Service ID", "rule name. type: str packet_loss_weight: description: - Coefficient of packet-loss", "or \\ status['http_method'] == \"DELETE\" and status['http_status'] == 404 def", "fortios_system(data, fos): if data['system_virtual_wan_link']: resp = system_virtual_wan_link(data, fos) return not", "- enable - disable src6: description: - Source address6 name.", "protocol: \"97\" quality_link: \"98\" route_tag: \"99\" sla: - health_check: \"<your_own_value>", "list suboptions: name: description: - Custom Internet Service group name.", "False, \"type\": \"bool\", \"default\": True}, \"ssl_verify\": {\"required\": False, \"type\": \"bool\",", "{\"required\": False, \"type\": \"int\"}, \"link_cost_factor\": {\"required\": False, \"type\": \"str\", \"choices\":", "firewall.addrgrp6.name)\" start_port: \"108\" status: \"enable\" tos: \"<your_own_value>\" tos_mask: \"<your_own_value>\" users:", "or health checking. Identify a server on the Internet and", "True, \"type\": \"str\"} }}, \"internet_service_custom_group\": {\"required\": False, \"type\": \"list\", \"options\":", "import FortiOSAPI except ImportError: module.fail_json(msg=\"fortiosapi module is required\") fos =", "type: str sample: \"success\" vdom: description: Virtual domain used returned:", "description: Serial number of the unit returned: always type: str", "list suboptions: seq_num: description: - Member sequence number. Source system.virtual-wan-link.members.seq-num.", "= {} for attribute in option_list: if attribute in json", "link_cost_factor: \"latency\" packetloss_threshold: \"28\" threshold_alert_jitter: \"29\" threshold_alert_latency: \"30\" threshold_alert_packetloss: \"31\"", "suboptions: name: description: - Control-based Internet Service group name. Source", "description: - Custom Internet Service group list. type: list suboptions:", "service for application-based load balancing. type: str choices: - enable", "the License, or # (at your option) any later version.", "list suboptions: name: description: - Address or address group name.", "\"102\" src: - name: \"default_name_104 (source firewall.address.name firewall.addrgrp.name)\" src_negate: \"enable\"", "PARTICULAR PURPOSE. See the # GNU General Public License for", "- 3600 sec). type: int members: description: - Member sequence", "\"threshold_warning_latency\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_packetloss\": {\"required\": False, \"type\": \"int\"},", "name: description: - Status check or health check name. required:", "to use for load balancing Internet traffic to SD-WAN members.", "members: description: - Member sequence number list. type: list suboptions:", "the formula of custom-profile-1. type: int latency_weight: description: - Coefficient", "to FortiGate returned: success type: str sample: \"id\" name: description:", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward\": {\"required\": False, \"type\":", "description: - Enable/disable SD-WAN service. type: str choices: - enable", "route regeneration (0 - 10000000). type: int member: description: -", "\"default_name_64 (source firewall.address6.name firewall.addrgrp6.name)\" end_port: \"65\" gateway: \"enable\" groups: -", "fulfill the request returned: always type: str sample: \"urlfilter\" path:", "(0 - 10000000). type: int latency_threshold: description: - Latency for", "\"type\": \"int\"}, \"threshold_alert_packetloss\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_jitter\": {\"required\": False,", "= fortios_system(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else:", "type: int jitter_threshold: description: - Jitter for SLA to make", "SD-WAN. type: int interface: description: - Interface name. Source system.interface.name.", "name: \"default_name_61 (source firewall.address.name firewall.addrgrp.name)\" dst_negate: \"enable\" dst6: - name:", "link_cost_factor: \"latency\" link_cost_threshold: \"90\" member: \"91\" mode: \"auto\" name: \"default_name_93\"", "terms of the GNU General Public License as published by", "\"protocol\": {\"required\": False, \"type\": \"str\", \"choices\": [\"ping\", \"tcp-echo\", \"udp-echo\", \"http\",", "- enable - disable dscp_forward_tag: description: - Forward traffic DSCP", "server. type: str source6: description: - Source IPv6 address used", "- Percentage threshold change of link cost values that will", "http_get: description: - URL used to communicate with the server", "module.fail_json(msg=\"fortiosapi module is required\") fos = FortiOSAPI() login(module.params, fos) is_error,", "False, \"type\": \"int\"}, \"seq_num\": {\"required\": False, \"type\": \"int\"}, \"source\": {\"required\":", "type: str choices: - enable - disable update_static_route: description: -", "ingress_spillover_threshold: description: - Ingress spillover threshold for this interface (0", "{\"required\": False, \"type\": \"int\"}, \"route_tag\": {\"required\": False, \"type\": \"int\"}, \"sla\":", "\"<your_own_value>\" tos_mask: \"<your_own_value>\" users: - name: \"default_name_113 (source user.local.name)\" status:", "Priority rule ID (1 - 4000). required: true type: int", "and virtual_wan_link category. Examples include all parameters and values need", "0 - 255). type: int weight: description: - Weight of", "the formula of custom-profile-1. type: int link_cost_factor: description: - Link", "\"88\" link_cost_factor: \"latency\" link_cost_threshold: \"90\" member: \"91\" mode: \"auto\" name:", "}}, \"load_balance_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"source-ip-based\", \"weight-based\", \"usage-based\",", "True, \"type\": \"str\"} }}, \"internet_service_id\": {\"required\": False, \"type\": \"list\", \"options\":", "agreement (SLA). type: list suboptions: health_check: description: - Virtual WAN", "- disable groups: description: - User groups. type: list suboptions:", "- name: \"default_name_107 (source firewall.address6.name firewall.addrgrp6.name)\" start_port: \"108\" status: \"enable\"", "{\"required\": False, \"type\": \"int\"}, \"threshold_alert_latency\": {\"required\": False, \"type\": \"int\"}, \"threshold_alert_packetloss\":", "- Priority of the interface (0 - 4294967295). Used for", "- Measured volume ratio (this value / sum of all", "ID list. type: list suboptions: id: description: - Control-based Internet", "list. type: list suboptions: name: description: - Control-based Internet Service", "Requires fortiosapi library developed by Fortinet - Run as a", "int link_cost_factor: description: - Link cost factor. type: str choices:", "json and json[attribute] is not None: dictionary[attribute] = json[attribute] return", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"internet_service_ctrl\": {\"required\": False, \"type\": \"list\",", "Enable/disable SD-WAN service. type: str choices: - enable - disable", "FortiOS or FortiGate username. type: str required: false password: description:", "\"choices\": [\"ipv4\", \"ipv6\"]}, \"failtime\": {\"required\": False, \"type\": \"int\"}, \"http_agent\": {\"required\":", "of failures before server is considered lost (1 - 3600).", "{ \"required\": False, \"type\": \"dict\", \"default\": None, \"options\": { \"fail_alert_interfaces\":", "IPv6 address used in the health-check packet to the server.", "\"\", \"no_log\": True}, \"vdom\": {\"required\": False, \"type\": \"str\", \"default\": \"root\"},", "physical interfaces in the SD-WAN. type: list suboptions: addr_mode: description:", "- Source address6 name. type: list suboptions: name: description: -", "service name list. type: list suboptions: name: description: - Custom", "\"int\"} }}, \"protocol\": {\"required\": False, \"type\": \"int\"}, \"quality_link\": {\"required\": False,", "is_successful_status(resp), \\ resp['status'] == \"success\", \\ resp def main(): fields", "configured and used as a different unit. type: str default:", "\"type\": \"int\"} }}, \"protocol\": {\"required\": False, \"type\": \"int\"}, \"quality_link\": {\"required\":", "id: description: - SLA ID. required: true type: int jitter_threshold:", "type: str id: description: - SLA ID. type: int src:", "root https: description: - Indicates if the requests towards FortiGate", "type: str sample: '1547' http_method: description: Last method used to", "traffic volume threshold is reached, new sessions spill over to", "in the SD-WAN. type: int interface: description: - Interface name.", "'username' in module.params and module.params['username'] is not None and \\", "default: null type: dict suboptions: fail_alert_interfaces: description: - Physical interfaces", "ssl_verify: description: - Ensures FortiGate certificate must be verified by", "name. Source firewall.address.name firewall.addrgrp.name. required: true type: str dst_negate: description:", "considered lost (1 - 3600). type: int http_agent: description: -", "used in the last call to FortiGate returned: success type:", "(this value / sum of all values = percentage of", "users: - name: \"default_name_113 (source user.local.name)\" status: \"disable\" ''' RETURN", "End destination port number. type: int gateway: description: - Enable/disable", "link selection. type: str choices: - latency - jitter -", "cost values that will result in policy route regeneration (0", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_id\": {\"required\": False,", "server is considered lost (1 - 3600). type: int http_agent:", "description: - Criteria on which to base link selection. type:", "policy route regeneration (0 - 10000000). type: int member: description:", "this interface. Usually the default gateway of the Internet service", "to interfaces with higher weights. type: int service: description: -", "switching from the back-up member to the primary member (0", "data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_system_virtual_wan_link_data(json):", "type: str choices: - latency - jitter - packet-loss -", "to provision the content into FortiGate returned: always type: str", "\"51\" service: - addr_mode: \"ipv4\" bandwidth_weight: \"54\" default: \"enable\" dscp_forward:", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_group\": {\"required\":", "\"str\"}, \"packet_size\": {\"required\": False, \"type\": \"int\"}, \"password\": {\"required\": False, \"type\":", "\"packet-loss\"]}, \"packetloss_threshold\": {\"required\": False, \"type\": \"int\"} }}, \"threshold_alert_jitter\": {\"required\": False,", "__metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version':", "either version 3 of the License, or # (at your", "User groups. type: list suboptions: name: description: - Group name.", "for this interface. Usually the default gateway of the Internet", "- Egress spillover threshold for this interface (0 - 16776000", "Copyright 2019 Fortinet, Inc. # # This program is free", "str packet_size: description: - Packet size of a twamp test", "\"latency\" link_cost_threshold: \"90\" member: \"91\" mode: \"auto\" name: \"default_name_93\" packet_loss_weight:", "required: false username: description: - FortiOS or FortiGate username. type:", "(ms). type: int threshold_warning_latency: description: - Warning threshold for latency", "FortiOS or FortiGate IP address. type: str required: false username:", "group name. Source firewall.address6.name firewall.addrgrp6.name. required: true type: str end_port:", "\"34\" update_cascade_interface: \"enable\" update_static_route: \"enable\" load_balance_mode: \"source-ip-based\" members: - comment:", "description: - Physical interfaces that will be alerted. type: list", "or IPv6). type: str choices: - ipv4 - ipv6 failtime:", "username }}\" password: \"{{ password }}\" vdom: \"{{ vdom }}\"", "description: - Enable/disable update cascade interface. type: str choices: -", "before usage. Tested with FOS v6.0.5 version_added: \"2.8\" author: -", "groups: - name: \"default_name_68 (source user.group.name)\" health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\"", "False, \"type\": \"str\", \"choices\": [\"disable\", \"enable\"]}, \"volume_ratio\": {\"required\": False, \"type\":", "\"str\", \"choices\": [\"enable\", \"disable\"]}, \"tos\": {\"required\": False, \"type\": \"str\"}, \"tos_mask\":", "\"dst\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "\"<your_own_value>\" users: - name: \"default_name_113 (source user.local.name)\" status: \"disable\" '''", "{ \"comment\": {\"required\": False, \"type\": \"str\"}, \"gateway\": {\"required\": False, \"type\":", "- Control-based Internet Service ID. required: true type: int internet_service_ctrl_group:", "type: list suboptions: name: description: - Physical interface name. Source", "str http_match: description: - Response string expected from the server", "Type of service evaluated bits. type: str users: description: -", "str priority: description: - Priority of the interface (0 -", "\"outbandwidth\", \"bibandwidth\", \"custom-profile-1\"]}, \"link_cost_threshold\": {\"required\": False, \"type\": \"int\"}, \"member\": {\"required\":", "type: str sample: \"200\" mkey: description: Master key (id) used", "type: str required: false username: description: - FortiOS or FortiGate", "data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify']", "server over the selected protocol. type: int protocol: description: -", "Type of service bit pattern. type: str tos_mask: description: -", "to the server (1 - 3600 sec). type: int members:", "{\"required\": False, \"type\": \"str\"}, \"port\": {\"required\": False, \"type\": \"int\"}, \"protocol\":", "formula of custom-profile-1. type: int default: description: - Enable/disable use", "\"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\", \"inbandwidth\", \"outbandwidth\", \"bibandwidth\", \"custom-profile-1\"]}, \"link_cost_threshold\":", "user.group.name)\" health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" hold_down_time: \"70\" id: \"71\" input_device:", "import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG", "for SLA to make decision in milliseconds. (0 - 10000000).", "\"type\": \"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }},", "False, \"type\": \"str\", \"default\": \"\", \"no_log\": True}, \"vdom\": {\"required\": False,", "False, \"type\": \"int\"} }}, \"service\": {\"required\": False, \"type\": \"list\", \"options\":", "system.virtual-wan-link.members.seq-num)\" name: \"default_name_15\" packet_size: \"16\" password: \"<<PASSWORD>>\" port: \"18\" protocol:", "fos): vdom = data['vdom'] system_virtual_wan_link_data = data['system_virtual_wan_link'] filtered_data = underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data))", "False, \"type\": \"int\"}, \"security_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"none\",", "- priority - sla name: description: - Priority rule name.", "\"measured-volume-based\"]}, \"members\": {\"required\": False, \"type\": \"list\", \"options\": { \"comment\": {\"required\":", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"src_negate\": {\"required\": False,", "sequence number. Source system.virtual-wan-link.members.seq-num. type: int name: description: - Status", "{\"required\": False, \"type\": \"bool\", \"default\": True}, \"system_virtual_wan_link\": { \"required\": False,", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"src6\": {\"required\": False, \"type\":", "the request returned: always type: str sample: \"webfilter\" revision: description:", "type: str port: description: - Port number used to communicate", "dst6: - name: \"default_name_64 (source firewall.address6.name firewall.addrgrp6.name)\" end_port: \"65\" gateway:", "virtual_wan_link category. Examples include all parameters and values need to", "HTTP header. type: str http_get: description: - URL used to", "- Priority rule ID (1 - 4000). required: true type:", "module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi", "ID. type: int src: description: - Source address name. type:", "rules or priority rules (also called services) to control how", "description: - This module is able to configure a FortiGate", "type: str start_port: description: - Start destination port number. type:", "firewall.internet-service-custom.name. required: true type: str internet_service_custom_group: description: - Custom Internet", "tasks: - name: Configure redundant internet connections using SD-WAN (formerly", "Service ID list. type: list suboptions: id: description: - Control-based", "Alert threshold for packet loss (percentage). type: int threshold_warning_jitter: description:", "# but WITHOUT ANY WARRANTY; without even the implied warranty", "description: - Virtual domain, among those defined previously. A vdom", "security_mode: description: - Twamp controller security mode. type: str choices:", "port number. type: int status: description: - Enable/disable SD-WAN service.", "\"sla\": {\"required\": False, \"type\": \"list\", \"options\": { \"id\": {\"required\": True,", "a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host:", "(formerly virtual WAN link). default: null type: dict suboptions: fail_alert_interfaces:", "of destination address match. type: str choices: - enable -", "ID (1 - 4000). required: true type: int input_device: description:", "\"no_log\": True}, \"vdom\": {\"required\": False, \"type\": \"str\", \"default\": \"root\"}, \"https\":", "\"192.168.122.40\" username: \"admin\" password: \"\" vdom: \"root\" ssl_verify: \"False\" tasks:", "\"seq_num\": {\"required\": False, \"type\": \"int\"} }}, \"name\": {\"required\": True, \"type\":", "\"type\": \"str\"}, \"hold_down_time\": {\"required\": False, \"type\": \"int\"}, \"id\": {\"required\": True,", "This program is distributed in the hope that it will", "{\"required\": False, \"type\": \"int\"}, \"id\": {\"required\": True, \"type\": \"int\"}, \"input_device\":", "\"internet_service_custom\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "ssl_verify: \"False\" tasks: - name: Configure redundant internet connections using", "in authentication mode type: str port: description: - Port number", "type: str http_match: description: - Response string expected from the", "vdom: \"root\" ssl_verify: \"False\" tasks: - name: Configure redundant internet", "\"packet_size\": {\"required\": False, \"type\": \"int\"}, \"password\": {\"required\": False, \"type\": \"str\"},", "health check name. required: true type: str packet_size: description: -", "threshold for packet loss (percentage). type: int threshold_warning_jitter: description: -", "False, \"type\": \"bool\", \"default\": True}, \"system_virtual_wan_link\": { \"required\": False, \"type\":", "checking or health checking. Identify a server on the Internet", "supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI", "- Control-based Internet Service group name. Source application.group.name. required: true", "or address6 group name. Source firewall.address6.name firewall.addrgrp6.name. required: true type:", "not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg=\"Error in repo\", meta=result) if", "for packet loss (percentage). type: int update_cascade_interface: description: - Enable/disable", "IPv6). type: str choices: - ipv4 - ipv6 bandwidth_weight: description:", "description: - Source address name. type: list suboptions: name: description:", "source address match. type: str choices: - enable - disable", "and \\ 'username' in module.params and module.params['username'] is not None", "(failure detection). type: str choices: - enable - disable health_check:", "Member sequence number. type: int mode: description: - Control how", "status: description: Indication of the operation's result returned: always type:", "return not is_successful_status(resp), \\ resp['status'] == \"success\", \\ resp def", "Source firewall.address6.name firewall.addrgrp6.name. required: true type: str end_port: description: -", "\"<your_own_value> (source system.interface.name)\" priority: \"44\" seq_num: \"45\" source: \"<your_own_value>\" source6:", "\"port\": {\"required\": False, \"type\": \"int\"}, \"protocol\": {\"required\": False, \"type\": \"str\",", "reverse traffic DSCP tag. type: str choices: - enable -", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"start_port\":", "}}\" https: \"False\" system_virtual_wan_link: fail_alert_interfaces: - name: \"default_name_4 (source system.interface.name)\"", "\"enable\" tos: \"<your_own_value>\" tos_mask: \"<your_own_value>\" users: - name: \"default_name_113 (source", "description: - Twamp controller password in authentication mode type: str", "str sample: \"id\" name: description: Name of the table used", "description: - Waiting period in seconds when switching from the", "{\"required\": False, \"type\": \"int\"}, \"threshold_warning_latency\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_packetloss\":", "of the License, or # (at your option) any later", "fortiosapi library developed by Fortinet - Run as a local_action", "user.local.name)\" status: \"disable\" ''' RETURN = ''' build: description: Build", "attribute in option_list: if attribute in json and json[attribute] is", "number used to communicate with the server over the selected", "{\"required\": False, \"type\": \"int\"}, \"threshold_warning_packetloss\": {\"required\": False, \"type\": \"int\"}, \"update_cascade_interface\":", "'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_system_virtual_wan_link short_description:", "table used to fulfill the request returned: always type: str", "\"gateway6\": {\"required\": False, \"type\": \"str\"}, \"ingress_spillover_threshold\": {\"required\": False, \"type\": \"int\"},", "sample: \"urlfilter\" path: description: Path of the table used to", "{\"required\": True, \"type\": \"str\"} }}, \"internet_service_id\": {\"required\": False, \"type\": \"list\",", "over to other interfaces in the SD-WAN. type: int interface:", "your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS", "[\"enable\", \"disable\"]}, \"tos\": {\"required\": False, \"type\": \"str\"}, \"tos_mask\": {\"required\": False,", "type: str http_get: description: - URL used to communicate with", "\"src\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "description: - Forward traffic DSCP tag. type: str dscp_reverse: description:", "mode to use for load balancing Internet traffic to SD-WAN", "DSCP tag. type: str choices: - enable - disable dscp_reverse_tag:", "\"manual\", \"priority\", \"sla\"]}, \"name\": {\"required\": False, \"type\": \"str\"}, \"packet_loss_weight\": {\"required\":", "name. Source system.interface.name. required: true type: str internet_service: description: -", "description: - Enable/disable forward traffic DSCP tag. type: str choices:", "from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet,", "link_cost_threshold: \"90\" member: \"91\" mode: \"auto\" name: \"default_name_93\" packet_loss_weight: \"94\"", "in the health-check packet to the server. type: str spillover_threshold:", "tos_mask: \"<your_own_value>\" users: - name: \"default_name_113 (source user.local.name)\" status: \"disable\"", "group list. type: list suboptions: name: description: - Control-based Internet", "returned: always type: str sample: \"success\" vdom: description: Virtual domain", "'virtual-wan-link', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == \"success\" or", "Enable/disable updating the static route. type: str choices: - enable", "or address group name. Source firewall.address.name firewall.addrgrp.name. required: true type:", "\"type\": \"int\"}, \"password\": {\"required\": False, \"type\": \"str\"}, \"port\": {\"required\": False,", "the back-up member to the primary member (0 - 10000000).", "type: int status: description: - Enable/disable this interface in the", "application-based load balancing. type: str choices: - enable - disable", "(source system.virtual-wan-link.health-check.name)\" id: \"102\" src: - name: \"default_name_104 (source firewall.address.name", "General Public License as published by # the Free Software", "Source firewall.address.name firewall.addrgrp.name. required: true type: str dst_negate: description: -", "not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def", "will be useful, # but WITHOUT ANY WARRANTY; without even", "and determine how SD-WAN verifies that the FortiGate can communicate", "see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by':", "type: str sample: \"FGVMEVYYQT3AB5352\" status: description: Indication of the operation's", "\"threshold_alert_latency\": {\"required\": False, \"type\": \"int\"}, \"threshold_alert_packetloss\": {\"required\": False, \"type\": \"int\"},", "is a virtual instance of the FortiGate that can be", "seq_num: description: - Member sequence number. Source system.virtual-wan-link.members.seq-num. type: int", "str choices: - ping - tcp-echo - udp-echo - http", "server is considered recovered (1 - 3600). type: int security_mode:", "<NAME> (@thomnico) notes: - Requires fortiosapi library developed by Fortinet", "system_virtual_wan_link(data, fos) return not is_successful_status(resp), \\ resp['status'] == \"success\", \\", "password. type: str default: \"\" vdom: description: - Virtual domain,", "- inbandwidth - outbandwidth - bibandwidth - custom-profile-1 link_cost_threshold: description:", "\"int\"}, \"input_device\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "choices: - enable - disable dscp_reverse_tag: description: - Reverse traffic", "k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data =", "for application-based load balancing. type: str choices: - enable -", "\"root\" version: description: Version of the FortiGate returned: always type:", "- Service level agreement (SLA). type: list suboptions: health_check: description:", "returned: always type: str sample: \"FGVMEVYYQT3AB5352\" status: description: Indication of", "update_static_route: \"enable\" load_balance_mode: \"source-ip-based\" members: - comment: \"Comments.\" gateway: \"<your_own_value>\"", "(SLA). type: list suboptions: id: description: - SLA ID. required:", "\"ipv6\"]}, \"failtime\": {\"required\": False, \"type\": \"int\"}, \"http_agent\": {\"required\": False, \"type\":", "is considered lost (1 - 3600). type: int http_agent: description:", "be verified by a proper CA. type: bool default: true", "name. Source firewall.internet-service-group.name. required: true type: str internet_service_id: description: -", "- auto - manual - priority - sla name: description:", "''' EXAMPLES = ''' - hosts: localhost vars: host: \"192.168.122.40\"", "{ \"addr_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"bandwidth_weight\":", "FOR A PARTICULAR PURPOSE. See the # GNU General Public", "choices: - none - authentication server: description: - IP address", "\"54\" default: \"enable\" dscp_forward: \"enable\" dscp_forward_tag: \"<your_own_value>\" dscp_reverse: \"enable\" dscp_reverse_tag:", "\"False\" system_virtual_wan_link: fail_alert_interfaces: - name: \"default_name_4 (source system.interface.name)\" fail_detect: \"enable\"", "the # GNU General Public License for more details. #", "local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description:", "source: description: - Source IP address used in the health-check", "to datasources before usage. Tested with FOS v6.0.5 version_added: \"2.8\"", "always type: str sample: \"root\" version: description: Version of the", "module.params['username'] is not None and \\ 'password' in module.params and", "it. type: list suboptions: addr_mode: description: - Address mode (IPv4", "true type: str fail_detect: description: - Enable/disable SD-WAN Internet connection", "the health-check packet to the server. type: str source6: description:", "'members', 'service', 'status'] dictionary = {} for attribute in option_list:", "of link cost values that will result in policy route", "\"server\": {\"required\": False, \"type\": \"str\"}, \"sla\": {\"required\": False, \"type\": \"list\",", "not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection)", "\"disable\"]}, \"dscp_forward\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward_tag\":", "\"25\" latency_threshold: \"26\" link_cost_factor: \"latency\" packetloss_threshold: \"28\" threshold_alert_jitter: \"29\" threshold_alert_latency:", "\"int\"}, \"link_cost_factor\": {\"required\": False, \"type\": \"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\",", "cascade interface. type: str choices: - enable - disable update_static_route:", "False, \"type\": \"int\"} }}, \"name\": {\"required\": True, \"type\": \"str\"}, \"packet_size\":", "\"security_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"none\", \"authentication\"]}, \"server\": {\"required\":", "\"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"failtime\": {\"required\": False, \"type\": \"int\"},", "suboptions: name: description: - Address6 or address6 group name. Source", "rules. type: int seq_num: description: - Sequence number(1-255). type: int", "} } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers", "- custom-profile-1 link_cost_threshold: description: - Percentage threshold change of link", "\"priority\", \"sla\"]}, \"name\": {\"required\": False, \"type\": \"str\"}, \"packet_loss_weight\": {\"required\": False,", "type: int seq_num: description: - Sequence number(1-255). type: int source:", "[\"ipv4\", \"ipv6\"]}, \"bandwidth_weight\": {\"required\": False, \"type\": \"int\"}, \"default\": {\"required\": False,", "disable dscp_forward_tag: description: - Forward traffic DSCP tag. type: str", "\"int\"} }}, \"internet_service_ctrl_group\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "description: - SD-WAN status checking or health checking. Identify a", "\"options\": { \"id\": {\"required\": True, \"type\": \"int\"}, \"jitter_threshold\": {\"required\": False,", "{\"required\": True, \"type\": \"str\"} }}, \"fail_detect\": {\"required\": False, \"type\": \"str\",", "str choices: - ipv4 - ipv6 failtime: description: - Number", "password, verify=ssl_verify) def filter_system_virtual_wan_link_data(json): option_list = ['fail_alert_interfaces', 'fail_detect', 'health_check', 'load_balance_mode',", "group name. Source firewall.internet-service-group.name. required: true type: str internet_service_id: description:", "applied returned: always type: str sample: \"200\" mkey: description: Master", "False, \"type\": \"list\", \"options\": { \"addr_mode\": {\"required\": False, \"type\": \"str\",", "False, \"type\": \"int\"}, \"threshold_warning_jitter\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_latency\": {\"required\":", "False, \"type\": \"int\"}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"disable\",", "\"internet_service\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"internet_service_ctrl\": {\"required\":", "in the formula of custom-profile-1. type: int priority_members: description: -", "fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_system(module.params,", "int threshold_warning_packetloss: description: - Warning threshold for packet loss (percentage).", "negation of source address match. type: str choices: - enable", "- source-ip-based - weight-based - usage-based - source-dest-ip-based - measured-volume-based", "- Latency for SLA to make decision in milliseconds. (0", "}}, \"fail_detect\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"health_check\":", "\"packet-loss\", \"inbandwidth\", \"outbandwidth\", \"bibandwidth\", \"custom-profile-1\"]}, \"link_cost_threshold\": {\"required\": False, \"type\": \"int\"},", "priority_members: description: - Member sequence number list. type: list suboptions:", "\"link_cost_threshold\": {\"required\": False, \"type\": \"int\"}, \"member\": {\"required\": False, \"type\": \"int\"},", "\"users\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "}}, \"name\": {\"required\": True, \"type\": \"str\"}, \"packet_size\": {\"required\": False, \"type\":", "FQDN name of the server. type: str sla: description: -", "software: you can redistribute it and/or modify # it under", "\"list\", \"options\": { \"id\": {\"required\": True, \"type\": \"int\"}, \"jitter_threshold\": {\"required\":", "\"type\": \"int\"} }}, \"service\": {\"required\": False, \"type\": \"list\", \"options\": {", "{ \"seq_num\": {\"required\": False, \"type\": \"int\"} }}, \"name\": {\"required\": True,", "--- module: fortios_system_virtual_wan_link short_description: Configure redundant internet connections using SD-WAN", "health_check: description: - Virtual WAN Link health-check. Source system.virtual-wan-link.health-check.name. type:", "is able to configure a FortiGate or FortiOS (FOS) device", "Service group name. Source application.group.name. required: true type: str internet_service_custom:", "- Internet Service group list. type: list suboptions: name: description:", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service\": {\"required\": False, \"type\":", "False, \"type\": \"str\"}, \"dst\": {\"required\": False, \"type\": \"list\", \"options\": {", "traffic DSCP tag. type: str dscp_reverse: description: - Enable/disable reverse", "\"type\": \"str\"}, \"packet_loss_weight\": {\"required\": False, \"type\": \"int\"}, \"priority_members\": {\"required\": False,", "str spillover_threshold: description: - Egress spillover threshold for this interface", "description: - Create SD-WAN rules or priority rules (also called", "checking. Identify a server on the Internet and determine how", "true type: str src_negate: description: - Enable/disable negation of source", "{\"required\": False, \"type\": \"int\"} }}, \"threshold_alert_jitter\": {\"required\": False, \"type\": \"int\"},", "status: description: - Enable/disable this interface in the SD-WAN. type:", "def is_successful_status(status): return status['status'] == \"success\" or \\ status['http_method'] ==", "{\"required\": True, \"type\": \"str\"} }}, \"end_port\": {\"required\": False, \"type\": \"int\"},", "fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg=\"Error in", "- Status check or health check name. required: true type:", "- Ensures FortiGate certificate must be verified by a proper", "fos): host = data['host'] username = data['username'] password = data['password']", "has_changed, result = fortios_system(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed,", "- Twamp controller security mode. type: str choices: - none", "\"str\"}, \"tos_mask\": {\"required\": False, \"type\": \"str\"}, \"users\": {\"required\": False, \"type\":", "fortios_system(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg=\"Error", "members. type: str choices: - source-ip-based - weight-based - usage-based", "disable groups: description: - User groups. type: list suboptions: name:", "\"90\" member: \"91\" mode: \"auto\" name: \"default_name_93\" packet_loss_weight: \"94\" priority_members:", "False, \"type\": \"int\"}, \"route_tag\": {\"required\": False, \"type\": \"int\"}, \"sla\": {\"required\":", "to be adjusted to datasources before usage. Tested with FOS", "copy of the GNU General Public License # along with", "\"str\", \"choices\": [\"enable\", \"disable\"]}, \"src6\": {\"required\": False, \"type\": \"list\", \"options\":", "jitter_threshold: description: - Jitter for SLA to make decision in", "\"fail_alert_interfaces\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "required: true type: int input_device: description: - Source interface name.", "weight: \"51\" service: - addr_mode: \"ipv4\" bandwidth_weight: \"54\" default: \"enable\"", "threshold_alert_latency: \"30\" threshold_alert_packetloss: \"31\" threshold_warning_jitter: \"32\" threshold_warning_latency: \"33\" threshold_warning_packetloss: \"34\"", "\"str\"}, \"spillover_threshold\": {\"required\": False, \"type\": \"int\"}, \"status\": {\"required\": False, \"type\":", "ingress_spillover_threshold: \"42\" interface: \"<your_own_value> (source system.interface.name)\" priority: \"44\" seq_num: \"45\"", "\"enable\" dscp_forward: \"enable\" dscp_forward_tag: \"<your_own_value>\" dscp_reverse: \"enable\" dscp_reverse_tag: \"<your_own_value>\" dst:", "SD-WAN members. type: str choices: - source-ip-based - weight-based -", "ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import", "\"choices\": [\"enable\", \"disable\"]}, \"dscp_forward\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "- Configure redundant internet connections using SD-WAN (formerly virtual WAN", "int input_device: description: - Source interface name. type: list suboptions:", "Custom Internet Service group list. type: list suboptions: name: description:", "type: list suboptions: comment: description: - Comments. type: str gateway:", "\"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward\": {\"required\": False, \"type\": \"str\", \"choices\":", "/ sum of all values = percentage of link volume,", "EXAMPLES = ''' - hosts: localhost vars: host: \"192.168.122.40\" username:", "end_port: \"65\" gateway: \"enable\" groups: - name: \"default_name_68 (source user.group.name)\"", "firewall.internet-service-group.name)\" internet_service_id: - id: \"86 (source firewall.internet-service.id)\" jitter_weight: \"87\" latency_weight:", "- disable dscp_forward_tag: description: - Forward traffic DSCP tag. type:", "{\"required\": False, \"type\": \"int\"}, \"mode\": {\"required\": False, \"type\": \"str\", \"choices\":", "\"start_port\": {\"required\": False, \"type\": \"int\"}, \"status\": {\"required\": False, \"type\": \"str\",", "\"enable\" internet_service_ctrl: - id: \"76\" internet_service_ctrl_group: - name: \"default_name_78 (source", "return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_group\":", "threshold_warning_latency: \"33\" threshold_warning_packetloss: \"34\" update_cascade_interface: \"enable\" update_static_route: \"enable\" load_balance_mode: \"source-ip-based\"", "the http-agent field in the HTTP header. type: str http_get:", "name: \"default_name_15\" packet_size: \"16\" password: \"<<PASSWORD>>\" port: \"18\" protocol: \"ping\"", "on the Internet and determine how SD-WAN verifies that the", "Alert threshold for latency (ms). type: int threshold_alert_packetloss: description: -", "from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios", "\"fail_detect\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"health_check\": {\"required\":", "\"str\"} }}, \"start_port\": {\"required\": False, \"type\": \"int\"}, \"status\": {\"required\": False,", "- 10000000). type: int member: description: - Member sequence number.", "in the SD-WAN. type: str choices: - auto - manual", "\"choices\": [\"enable\", \"disable\"]}, \"tos\": {\"required\": False, \"type\": \"str\"}, \"tos_mask\": {\"required\":", "- Enable/disable negation of source address match. type: str choices:", "firewall.addrgrp6.name. required: true type: str end_port: description: - End destination", "- End destination port number. type: int gateway: description: -", "name: description: - Internet Service group name. Source firewall.internet-service-group.name. required:", "internet_service_custom_group: - name: \"default_name_82 (source firewall.internet-service-custom-group.name)\" internet_service_group: - name: \"default_name_84", "\"int\"}, \"gateway\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"groups\":", "= {} for k, v in data.items(): new_data[k.replace('_', '-')] =", "- Control-based Internet Service ID list. type: list suboptions: id:", "useful, # but WITHOUT ANY WARRANTY; without even the implied", "name. Source system.interface.name. type: str priority: description: - Priority of", "jitter (ms). type: int threshold_alert_latency: description: - Alert threshold for", "firewall.internet-service-group.name. required: true type: str internet_service_id: description: - Internet service", "legacy_mode = 'host' in module.params and module.params['host'] is not None", "False, \"type\": \"int\"}, \"source\": {\"required\": False, \"type\": \"str\"}, \"source6\": {\"required\":", "\"source-ip-based\" members: - comment: \"Comments.\" gateway: \"<your_own_value>\" gateway6: \"<your_own_value>\" ingress_spillover_threshold:", "data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https'", "Source user.local.name. required: true type: str status: description: - Enable/disable", "type: str choices: - enable - disable src6: description: -", "\"type\": \"int\"} }}, \"jitter_weight\": {\"required\": False, \"type\": \"int\"}, \"latency_weight\": {\"required\":", "\"default\": \"root\"}, \"https\": {\"required\": False, \"type\": \"bool\", \"default\": True}, \"ssl_verify\":", "{\"required\": False, \"type\": \"str\"}, \"users\": {\"required\": False, \"type\": \"list\", \"options\":", "later version. # # This program is distributed in the", "\"type\": \"int\"}, \"mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"auto\", \"manual\",", "internet_service_id: description: - Internet service ID list. type: list suboptions:", "redundant internet connections using SD-WAN (formerly virtual WAN link). default:", "\"inbandwidth\", \"outbandwidth\", \"bibandwidth\", \"custom-profile-1\"]}, \"link_cost_threshold\": {\"required\": False, \"type\": \"int\"}, \"member\":", "False, \"type\": \"str\"}, \"password\": {\"required\": False, \"type\": \"str\", \"default\": \"\",", "}}, \"internet_service_id\": {\"required\": False, \"type\": \"list\", \"options\": { \"id\": {\"required\":", "\"update_cascade_interface\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"update_static_route\": {\"required\":", "# You should have received a copy of the GNU", "(source firewall.internet-service.id)\" jitter_weight: \"87\" latency_weight: \"88\" link_cost_factor: \"latency\" link_cost_threshold: \"90\"", "{\"required\": False, \"type\": \"int\"}, \"protocol\": {\"required\": False, \"type\": \"str\", \"choices\":", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"start_port\": {\"required\": False,", "choices: - enable - disable dst6: description: - Destination address6", "returned: success type: str sample: \"id\" name: description: Name of", "Source firewall.internet-service.id. required: true type: int jitter_weight: description: - Coefficient", "previously. A vdom is a virtual instance of the FortiGate", "\"\" vdom: description: - Virtual domain, among those defined previously.", "system_virtual_wan_link: description: - Configure redundant internet connections using SD-WAN (formerly", "password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in", "False, \"type\": \"int\"}, \"threshold_warning_packetloss\": {\"required\": False, \"type\": \"int\"}, \"update_cascade_interface\": {\"required\":", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_custom\":", "\"default_name_84 (source firewall.internet-service-group.name)\" internet_service_id: - id: \"86 (source firewall.internet-service.id)\" jitter_weight:", "- Address mode (IPv4 or IPv6). type: str choices: -", "list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data,", "are distributed to physical interfaces in the SD-WAN. type: list", "- ipv4 - ipv6 bandwidth_weight: description: - Coefficient of reciprocal", "\"20\" security_mode: \"none\" server: \"192.168.100.40\" sla: - id: \"24\" jitter_threshold:", "\"route_tag\": {\"required\": False, \"type\": \"int\"}, \"sla\": {\"required\": False, \"type\": \"list\",", "description: - Enable/disable reverse traffic DSCP tag. type: str choices:", "match. type: str choices: - enable - disable dst6: description:", "Internet service for application-based load balancing. type: str choices: -", "Physical interfaces that will be alerted. type: list suboptions: name:", "agreement (SLA). type: list suboptions: id: description: - SLA ID.", "route. type: str choices: - enable - disable load_balance_mode: description:", "name: description: - Interface name. Source system.interface.name. required: true type:", "description: - Enable/disable SD-WAN service gateway. type: str choices: -", "False, \"type\": \"int\"}, \"weight\": {\"required\": False, \"type\": \"int\"} }}, \"service\":", "description: - Packet size of a twamp test session, type:", "str id: description: - SLA ID. type: int src: description:", "id: \"71\" input_device: - name: \"default_name_73 (source system.interface.name)\" internet_service: \"enable\"", "volume ratio (this value / sum of all values =", "id: \"86 (source firewall.internet-service.id)\" jitter_weight: \"87\" latency_weight: \"88\" link_cost_factor: \"latency\"", "(source system.interface.name)\" priority: \"44\" seq_num: \"45\" source: \"<your_own_value>\" source6: \"<your_own_value>\"", "- ping - tcp-echo - udp-echo - http - twamp", "FortiGate can communicate with it. type: list suboptions: addr_mode: description:", "The default gateway for this interface. Usually the default gateway", "gateway. type: str ingress_spillover_threshold: description: - Ingress spillover threshold for", "- Type of service evaluated bits. type: str users: description:", "make decision in milliseconds. (0 - 10000000). type: int link_cost_factor:", "failures before server is considered lost (1 - 3600). type:", "true type: str dst_negate: description: - Enable/disable negation of destination", "- disable - enable ''' EXAMPLES = ''' - hosts:", "\"default_name_4 (source system.interface.name)\" fail_detect: \"enable\" health_check: - addr_mode: \"ipv4\" failtime:", "- name: \"default_name_73 (source system.interface.name)\" internet_service: \"enable\" internet_service_ctrl: - id:", "the server. type: str sla: description: - Service level agreement", "username: description: - FortiOS or FortiGate username. type: str required:", "\"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\"]}, \"packetloss_threshold\": {\"required\": False, \"type\": \"int\"}", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"update_static_route\": {\"required\": False, \"type\": \"str\",", "Source IPv6 address used in the health-check packet to the", "enable - disable dscp_forward: description: - Enable/disable forward traffic DSCP", "user to set and modify system feature and virtual_wan_link category.", "\"default_name_113 (source user.local.name)\" status: \"disable\" ''' RETURN = ''' build:", "\"type\": \"str\"}, \"gateway6\": {\"required\": False, \"type\": \"str\"}, \"ingress_spillover_threshold\": {\"required\": False,", "type: int http_agent: description: - String in the http-agent field", "the table used to fulfill the request returned: always type:", "\"str\"}, \"http_match\": {\"required\": False, \"type\": \"str\"}, \"interval\": {\"required\": False, \"type\":", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"update_static_route\": {\"required\": False,", "as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options:", "str port: description: - Port number used to communicate with", "str internet_service_custom: description: - Custom Internet service name list. type:", "False, \"type\": \"str\"}, \"priority\": {\"required\": False, \"type\": \"int\"}, \"seq_num\": {\"required\":", "udp-echo - http - twamp - ping6 recoverytime: description: -", "sample: \"FGVMEVYYQT3AB5352\" status: description: Indication of the operation's result returned:", "- hosts: localhost vars: host: \"192.168.122.40\" username: \"admin\" password: \"\"", "# the Free Software Foundation, either version 3 of the", "to make decision in milliseconds. (0 - 10000000). type: int", "\"str\"} }}, \"src_negate\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]},", "spill over to other interfaces in the SD-WAN. type: int", "\"32\" threshold_warning_latency: \"33\" threshold_warning_packetloss: \"34\" update_cascade_interface: \"enable\" update_static_route: \"enable\" load_balance_mode:", "data = new_data return data def system_virtual_wan_link(data, fos): vdom =", "formula of custom-profile-1. type: int link_cost_factor: description: - Link cost", "\\ 'password' in module.params and module.params['password'] is not None if", "tag. type: str dscp_reverse: description: - Enable/disable reverse traffic DSCP", "- enable - disable dst6: description: - Destination address6 name.", "name: \"default_name_104 (source firewall.address.name firewall.addrgrp.name)\" src_negate: \"enable\" src6: - name:", "threshold_alert_latency: description: - Alert threshold for latency (ms). type: int", "Source interface name. type: list suboptions: name: description: - Interface", "the Internet service provider that this interface is connected to.", "choices: - source-ip-based - weight-based - usage-based - source-dest-ip-based -", "SD-WAN status checking or health checking. Identify a server on", "str internet_service_group: description: - Internet Service group list. type: list", "{\"required\": False, \"type\": \"int\"}, \"weight\": {\"required\": False, \"type\": \"int\"} }},", "system.virtual-wan-link.health-check.name)\" hold_down_time: \"70\" id: \"71\" input_device: - name: \"default_name_73 (source", "Jitter for SLA to make decision in milliseconds. (0 -", "result given by FortiGate on last operation applied returned: always", "on last operation applied returned: always type: str sample: \"200\"", "# legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode", "but WITHOUT ANY WARRANTY; without even the implied warranty of", "type: int internet_service_ctrl_group: description: - Control-based Internet Service group list.", "str required: false username: description: - FortiOS or FortiGate username.", "Internal revision number returned: always type: str sample: \"17.0.2.10658\" serial:", "Internet connection status checking (failure detection). type: str choices: -", "list suboptions: name: description: - Custom Internet service name. Source", "- 100). type: int threshold_alert_jitter: description: - Alert threshold for", "Waiting period in seconds when switching from the back-up member", "SD-WAN as default service. type: str choices: - enable -", "= percentage of link volume, 0 - 255). type: int", "URL used to communicate with the server if the protocol", "make decision in milliseconds. (0 - 10000000). type: int latency_threshold:", "Control-based Internet Service ID list. type: list suboptions: id: description:", "Software Foundation, either version 3 of the License, or #", "for SLA to make decision in percentage. (0 - 100).", "- name: Configure redundant internet connections using SD-WAN (formerly virtual", "type: dict suboptions: fail_alert_interfaces: description: - Physical interfaces that will", "fail_alert_interfaces: - name: \"default_name_4 (source system.interface.name)\" fail_detect: \"enable\" health_check: -", "- disable update_static_route: description: - Enable/disable updating the static route.", "of the table used to fulfill the request returned: always", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }} }},", "\"str\"}, \"source6\": {\"required\": False, \"type\": \"str\"}, \"spillover_threshold\": {\"required\": False, \"type\":", "{\"required\": True, \"type\": \"str\"} }}, \"health_check\": {\"required\": False, \"type\": \"str\"},", "Algorithm or mode to use for load balancing Internet traffic", "fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address.", "\"int\"}, \"id\": {\"required\": True, \"type\": \"int\"}, \"input_device\": {\"required\": False, \"type\":", "\"str\", \"default\": \"root\"}, \"https\": {\"required\": False, \"type\": \"bool\", \"default\": True},", "default: \"enable\" dscp_forward: \"enable\" dscp_forward_tag: \"<your_own_value>\" dscp_reverse: \"enable\" dscp_reverse_tag: \"<your_own_value>\"", "\"update_static_route\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]} }}, \"load_balance_mode\":", "\"str\", \"choices\": [\"enable\", \"disable\"]}, \"internet_service_ctrl\": {\"required\": False, \"type\": \"list\", \"options\":", "or health check name. required: true type: str packet_size: description:", "parameters and values need to be adjusted to datasources before", "elif isinstance(data, dict): new_data = {} for k, v in", "\"choices\": [\"enable\", \"disable\"]}, \"health_check\": {\"required\": False, \"type\": \"list\", \"options\": {", "version 3 of the License, or # (at your option)", "\"internet_service_id\": {\"required\": False, \"type\": \"list\", \"options\": { \"id\": {\"required\": True,", "is HTTP. type: str interval: description: - Status check interval,", "Configure redundant internet connections using SD-WAN (formerly virtual WAN link).", "type: int priority_members: description: - Member sequence number list. type:", "str sample: \"webfilter\" revision: description: Internal revision number returned: always", "type: str internet_service_id: description: - Internet service ID list. type:", "password }}\" vdom: \"{{ vdom }}\" https: \"False\" system_virtual_wan_link: fail_alert_interfaces:", "description: Master key (id) used in the last call to", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_id\": {\"required\":", "FortiOSAPI except ImportError: module.fail_json(msg=\"fortiosapi module is required\") fos = FortiOSAPI()", "- Enable/disable use of SD-WAN as default service. type: str", "this interface is connected to. type: str gateway6: description: -", "Control-based Internet Service ID. required: true type: int internet_service_ctrl_group: description:", "\"int\"}, \"update_cascade_interface\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"update_static_route\":", "source6: description: - Source IPv6 address used in the health-check", "evaluated bits. type: str users: description: - User name. type:", "Enable/disable SD-WAN. type: str choices: - disable - enable '''", "negation of destination address match. type: str choices: - enable", "of custom-profile-1. type: int default: description: - Enable/disable use of", "\"18\" protocol: \"ping\" recoverytime: \"20\" security_mode: \"none\" server: \"192.168.100.40\" sla:", "Status check or health check name. required: true type: str", "a copy of the GNU General Public License # along", "Internet Service group name. Source firewall.internet-service-group.name. required: true type: str", "\"str\"} }}, \"internet_service_group\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "list suboptions: id: description: - Control-based Internet Service ID. required:", "- Forward traffic DSCP tag. type: str dscp_reverse: description: -", "- 16776000 kbit/s). When this traffic volume threshold is reached,", "\"choices\": [\"latency\", \"jitter\", \"packet-loss\"]}, \"packetloss_threshold\": {\"required\": False, \"type\": \"int\"} }},", "use HTTPS protocol. type: bool default: true ssl_verify: description: -", "spillover threshold for this interface (0 - 16776000 kbit/s). When", "{\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True, \"type\":", "''' --- module: fortios_system_virtual_wan_link short_description: Configure redundant internet connections using", "- Address6 or address6 group name. Source firewall.address6.name firewall.addrgrp6.name. required:", "\"type\": \"str\"}, \"http_match\": {\"required\": False, \"type\": \"str\"}, \"interval\": {\"required\": False,", "name. type: list suboptions: name: description: - Address or address", "none - authentication server: description: - IP address or FQDN", "\"str\"} }}, \"health_check\": {\"required\": False, \"type\": \"str\"}, \"hold_down_time\": {\"required\": False,", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"health_check\": {\"required\":", "of custom-profile-1. type: int latency_weight: description: - Coefficient of latency", "str default: \"\" vdom: description: - Virtual domain, among those", "description: - The default gateway for this interface. Usually the", "ID. required: true type: int internet_service_ctrl_group: description: - Control-based Internet", "data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password,", "None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos", "}}, \"health_check\": {\"required\": False, \"type\": \"str\"}, \"hold_down_time\": {\"required\": False, \"type\":", "- disable internet_service_ctrl: description: - Control-based Internet Service ID list.", "{\"required\": False, \"type\": \"list\", \"options\": { \"id\": {\"required\": True, \"type\":", "Member sequence number list. type: list suboptions: seq_num: description: -", "redundant internet connections using SD-WAN (formerly virtual WAN link) in", "\"str\"}, \"port\": {\"required\": False, \"type\": \"int\"}, \"protocol\": {\"required\": False, \"type\":", "{ \"health_check\": {\"required\": False, \"type\": \"str\"}, \"id\": {\"required\": False, \"type\":", "3600). type: int security_mode: description: - Twamp controller security mode.", "module.params and module.params['password'] is not None if not legacy_mode: if", "mode: description: - Control how the priority rule sets the", "comment: \"Comments.\" gateway: \"<your_own_value>\" gateway6: \"<your_own_value>\" ingress_spillover_threshold: \"42\" interface: \"<your_own_value>", "Tested with FOS v6.0.5 version_added: \"2.8\" author: - <NAME> (@mamunozgonzalez)", "Destination address6 name. type: list suboptions: name: description: - Address6", "dscp_forward_tag: description: - Forward traffic DSCP tag. type: str dscp_reverse:", "the server. type: str source6: description: - Source IPv6 address", "vdom=vdom) def is_successful_status(status): return status['status'] == \"success\" or \\ status['http_method']", "health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" hold_down_time: \"70\" id: \"71\" input_device: -", "= type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'}", "ipv6 bandwidth_weight: description: - Coefficient of reciprocal of available bidirectional", "firewall.address6.name firewall.addrgrp6.name. required: true type: str start_port: description: - Start", "disable - enable ''' EXAMPLES = ''' - hosts: localhost", "10000000). type: int id: description: - Priority rule ID (1", "{\"required\": False, \"type\": \"list\", \"options\": { \"addr_mode\": {\"required\": False, \"type\":", "FortiGate returned: success type: str sample: \"id\" name: description: Name", "datasources before usage. Tested with FOS v6.0.5 version_added: \"2.8\" author:", "False, \"type\": \"int\"}, \"password\": {\"required\": False, \"type\": \"str\"}, \"port\": {\"required\":", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_custom\": {\"required\": False, \"type\":", "description: - Number of successful responses received before server is", "expected from the server if the protocol is HTTP. type:", "\"username\": {\"required\": False, \"type\": \"str\"}, \"password\": {\"required\": False, \"type\": \"str\",", "default gateway of the Internet service provider that this interface", "kbit/s). When this traffic volume threshold is reached, new sessions", "- Ingress spillover threshold for this interface (0 - 16776000", "\"type\": \"int\"}, \"weight\": {\"required\": False, \"type\": \"int\"} }}, \"service\": {\"required\":", "interface is connected to. type: str gateway6: description: - IPv6", "\"type\": \"str\"}, \"ingress_spillover_threshold\": {\"required\": False, \"type\": \"int\"}, \"interface\": {\"required\": False,", "added to the virtual-wan-link. type: list suboptions: comment: description: -", "a twamp test session, type: int password: description: - Twamp", "of interfaces in the SD-WAN. type: str choices: - auto", "\"str\"} }}, \"end_port\": {\"required\": False, \"type\": \"int\"}, \"gateway\": {\"required\": False,", "required: true type: int jitter_threshold: description: - Jitter for SLA", "type: list suboptions: name: description: - User name. Source user.local.name.", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or", "priority: description: - Priority of the interface (0 - 4294967295).", "number returned: always type: str sample: \"17.0.2.10658\" serial: description: Serial", "SD-WAN (formerly virtual WAN link). fortios_system_virtual_wan_link: host: \"{{ host }}\"", "verifies that the FortiGate can communicate with it. type: list", "str choices: - disable - enable volume_ratio: description: - Measured", "\"str\"}, \"ingress_spillover_threshold\": {\"required\": False, \"type\": \"int\"}, \"interface\": {\"required\": False, \"type\":", "list. type: list suboptions: id: description: - Control-based Internet Service", "- 10000000). type: int id: description: - Priority rule ID", "\"type\": \"int\"}, \"threshold_warning_latency\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_packetloss\": {\"required\": False,", "(also called services) to control how sessions are distributed to", "\"dscp_reverse\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_reverse_tag\": {\"required\":", "- Member sequence number list. type: list suboptions: seq_num: description:", "= underscore_to_hyphen(v) data = new_data return data def system_virtual_wan_link(data, fos):", "{ \"addr_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"failtime\":", "using SD-WAN (formerly virtual WAN link) in Fortinet's FortiOS and", "firewall.address.name firewall.addrgrp.name)\" dst_negate: \"enable\" dst6: - name: \"default_name_64 (source firewall.address6.name", "true type: str internet_service_group: description: - Internet Service group list.", "and module.params['username'] is not None and \\ 'password' in module.params", "\"94\" priority_members: - seq_num: \"96 (source system.virtual-wan-link.members.seq-num)\" protocol: \"97\" quality_link:", "is distributed in the hope that it will be useful,", "3 of the License, or # (at your option) any", "= 'host' in module.params and module.params['host'] is not None and", "Coefficient of jitter in the formula of custom-profile-1. type: int", "Source system.interface.name. required: true type: str fail_detect: description: - Enable/disable", "system.interface.name. required: true type: str internet_service: description: - Enable/disable use", "- Warning threshold for jitter (ms). type: int threshold_warning_latency: description:", "used to fulfill the request returned: always type: str sample:", "int update_cascade_interface: description: - Enable/disable update cascade interface. type: str", "to determine if the FortiGate can communicate with the server.", "link_cost_factor: description: - Criteria on which to base link selection.", "# along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__", "\"type\": \"int\"}, \"http_agent\": {\"required\": False, \"type\": \"str\"}, \"http_get\": {\"required\": False,", "address or FQDN name of the server. type: str sla:", "\"int\"}, \"security_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"none\", \"authentication\"]}, \"server\":", "\"choices\": [\"enable\", \"disable\"]}, \"internet_service_ctrl\": {\"required\": False, \"type\": \"list\", \"options\": {", "int member: description: - Member sequence number. type: int mode:", "int service: description: - Create SD-WAN rules or priority rules", "type: str sample: \"urlfilter\" path: description: Path of the table", "mode: \"auto\" name: \"default_name_93\" packet_loss_weight: \"94\" priority_members: - seq_num: \"96", "name: \"default_name_93\" packet_loss_weight: \"94\" priority_members: - seq_num: \"96 (source system.virtual-wan-link.members.seq-num)\"", "packetloss_threshold: description: - Packet loss for SLA to make decision", "= underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k,", "# This program is distributed in the hope that it", "- 3600). type: int http_agent: description: - String in the", "- seq_num: \"14 (source system.virtual-wan-link.members.seq-num)\" name: \"default_name_15\" packet_size: \"16\" password:", "str sample: \"urlfilter\" path: description: Path of the table used", "description: - Member sequence number list. type: list suboptions: seq_num:", "group name. Source firewall.address6.name firewall.addrgrp6.name. required: true type: str start_port:", "id: description: - Internet service ID. Source firewall.internet-service.id. required: true", "threshold_warning_packetloss: \"34\" update_cascade_interface: \"enable\" update_static_route: \"enable\" load_balance_mode: \"source-ip-based\" members: -", "WITHOUT ANY WARRANTY; without even the implied warranty of #", "- Enable/disable SD-WAN. type: str choices: - disable - enable", "\"type\": \"str\"} }}, \"fail_detect\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "License # along with this program. If not, see <https://www.gnu.org/licenses/>.", "default service. type: str choices: - enable - disable dscp_forward:", "set and modify system feature and virtual_wan_link category. Examples include", "received before server is considered recovered (1 - 3600). type:", "description: - Protocol number. type: int quality_link: description: - Quality", "description: - Custom Internet service name. Source firewall.internet-service-custom.name. required: true", "volume_ratio: \"50\" weight: \"51\" service: - addr_mode: \"ipv4\" bandwidth_weight: \"54\"", "\"type\": \"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\"]}, \"packetloss_threshold\": {\"required\": False, \"type\":", "\"int\"}, \"link_cost_factor\": {\"required\": False, \"type\": \"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\"]},", "interval, or the time between attempting to connect to the", "suboptions: id: description: - Internet service ID. Source firewall.internet-service.id. required:", "False, \"type\": \"str\"}, \"port\": {\"required\": False, \"type\": \"int\"}, \"protocol\": {\"required\":", "\"int\"}, \"seq_num\": {\"required\": False, \"type\": \"int\"}, \"source\": {\"required\": False, \"type\":", "{\"required\": False, \"type\": \"str\"}, \"password\": {\"required\": False, \"type\": \"str\", \"default\":", "\"tcp-echo\", \"udp-echo\", \"http\", \"twamp\", \"ping6\"]}, \"recoverytime\": {\"required\": False, \"type\": \"int\"},", "AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of", "else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError:", "Indication of the operation's result returned: always type: str sample:", "formula of custom-profile-1. type: int latency_weight: description: - Coefficient of", "vdom is a virtual instance of the FortiGate that can", "type: int password: description: - Twamp controller password in authentication", "description: - Destination address name. type: list suboptions: name: description:", "returned: always type: str sample: \"200\" mkey: description: Master key", "or # (at your option) any later version. # #", "towards FortiGate must use HTTPS protocol. type: bool default: true", "route-tag. type: int sla: description: - Service level agreement (SLA).", "}}, \"src_negate\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"src6\":", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_group\": {\"required\": False, \"type\":", "\"<your_own_value> (source system.virtual-wan-link.health-check.name)\" hold_down_time: \"70\" id: \"71\" input_device: - name:", "[\"auto\", \"manual\", \"priority\", \"sla\"]}, \"name\": {\"required\": False, \"type\": \"str\"}, \"packet_loss_weight\":", "Interface name. Source system.interface.name. required: true type: str internet_service: description:", "\"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_reverse_tag\": {\"required\": False, \"type\": \"str\"}, \"dst\":", "description: - Coefficient of reciprocal of available bidirectional bandwidth in", "\"ipv4\" failtime: \"8\" http_agent: \"<your_own_value>\" http_get: \"<your_own_value>\" http_match: \"<your_own_value>\" interval:", "the request returned: always type: str sample: \"urlfilter\" path: description:", "int http_agent: description: - String in the http-agent field in", "from the server if the protocol is HTTP. type: str", "\"source-dest-ip-based\", \"measured-volume-based\"]}, \"members\": {\"required\": False, \"type\": \"list\", \"options\": { \"comment\":", "{\"required\": False, \"type\": \"str\", \"default\": \"\", \"no_log\": True}, \"vdom\": {\"required\":", "jitter in the formula of custom-profile-1. type: int latency_weight: description:", "- Enable/disable updating the static route. type: str choices: -", "- Number of successful responses received before server is considered", "loss (percentage). type: int update_cascade_interface: description: - Enable/disable update cascade", "\"custom-profile-1\"]}, \"link_cost_threshold\": {\"required\": False, \"type\": \"int\"}, \"member\": {\"required\": False, \"type\":", "the content into FortiGate returned: always type: str sample: 'PUT'", "description: - User groups. type: list suboptions: name: description: -", "type: int link_cost_factor: description: - Link cost factor. type: str", "\"http_get\": {\"required\": False, \"type\": \"str\"}, \"http_match\": {\"required\": False, \"type\": \"str\"},", "Source user.group.name. required: true type: str health_check: description: - Health", "type: list suboptions: addr_mode: description: - Address mode (IPv4 or", "- jitter - packet-loss - inbandwidth - outbandwidth - bibandwidth", "members: - seq_num: \"14 (source system.virtual-wan-link.members.seq-num)\" name: \"default_name_15\" packet_size: \"16\"", "choices: - auto - manual - priority - sla name:", "dscp_reverse_tag: \"<your_own_value>\" dst: - name: \"default_name_61 (source firewall.address.name firewall.addrgrp.name)\" dst_negate:", "protocol: description: - Protocol number. type: int quality_link: description: -", "str sample: \"success\" vdom: description: Virtual domain used returned: always", "type: str packet_size: description: - Packet size of a twamp", "Internet traffic to SD-WAN members. type: str choices: - source-ip-based", "id: description: - SLA ID. type: int src: description: -", "type: int threshold_alert_packetloss: description: - Alert threshold for packet loss", "\"password\": {\"required\": False, \"type\": \"str\"}, \"port\": {\"required\": False, \"type\": \"int\"},", "threshold_warning_packetloss: description: - Warning threshold for packet loss (percentage). type:", "- enable - disable internet_service_ctrl: description: - Control-based Internet Service", "\"usage-based\", \"source-dest-ip-based\", \"measured-volume-based\"]}, \"members\": {\"required\": False, \"type\": \"list\", \"options\": {", "vdom: description: Virtual domain used returned: always type: str sample:", "- Warning threshold for latency (ms). type: int threshold_warning_packetloss: description:", "Internet service name. Source firewall.internet-service-custom.name. required: true type: str internet_service_custom_group:", "<https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community',", "\"type\": \"list\", \"options\": { \"comment\": {\"required\": False, \"type\": \"str\"}, \"gateway\":", "protocol: description: - Protocol used to determine if the FortiGate", "hold_down_time: \"70\" id: \"71\" input_device: - name: \"default_name_73 (source system.interface.name)\"", "option_list = ['fail_alert_interfaces', 'fail_detect', 'health_check', 'load_balance_mode', 'members', 'service', 'status'] dictionary", "\"int\"}, \"member\": {\"required\": False, \"type\": \"int\"}, \"mode\": {\"required\": False, \"type\":", "\"int\"}, \"source\": {\"required\": False, \"type\": \"str\"}, \"source6\": {\"required\": False, \"type\":", "type: str users: description: - User name. type: list suboptions:", "option) any later version. # # This program is distributed", "with higher weights. type: int service: description: - Create SD-WAN", "\"default_name_73 (source system.interface.name)\" internet_service: \"enable\" internet_service_ctrl: - id: \"76\" internet_service_ctrl_group:", "sets the priority of interfaces in the SD-WAN. type: str", "Protocol used to determine if the FortiGate can communicate with", "auto - manual - priority - sla name: description: -", "choices: - latency - jitter - packet-loss packetloss_threshold: description: -", "type: str sample: \"id\" name: description: Name of the table", "type: int source: description: - Source IP address used in", "description: - Member sequence number. Source system.virtual-wan-link.members.seq-num. type: int name:", "int password: description: - Twamp controller password in authentication mode", "dst: description: - Destination address name. type: list suboptions: name:", "- enable - disable groups: description: - User groups. type:", "required: true type: str internet_service_id: description: - Internet service ID", "description: - Service level agreement (SLA). type: list suboptions: health_check:", "period in seconds when switching from the back-up member to", "type: list suboptions: name: description: - Control-based Internet Service group", "to other interfaces in the SD-WAN. type: int status: description:", "between attempting to connect to the server (1 - 3600", "balancing. type: str choices: - enable - disable internet_service_ctrl: description:", "suboptions: name: description: - Internet Service group name. Source firewall.internet-service-group.name.", "http_method: description: Last method used to provision the content into", "name. Source application.group.name. required: true type: str internet_service_custom: description: -", "\"type\": \"int\"}, \"threshold_warning_packetloss\": {\"required\": False, \"type\": \"int\"}, \"update_cascade_interface\": {\"required\": False,", "\"options\": { \"comment\": {\"required\": False, \"type\": \"str\"}, \"gateway\": {\"required\": False,", "else: module.fail_json(msg=\"Error in repo\", meta=result) if __name__ == '__main__': main()", "- twamp - ping6 recoverytime: description: - Number of successful", "link_cost_factor: description: - Link cost factor. type: str choices: -", "called services) to control how sessions are distributed to physical", "required: true type: str packet_size: description: - Packet size of", "\"str\"}, \"users\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "description: - Address or address group name. Source firewall.address.name firewall.addrgrp.name.", "sec). type: int members: description: - Member sequence number list.", "result in policy route regeneration (0 - 10000000). type: int", "module.params and module.params['host'] is not None and \\ 'username' in", "int gateway: description: - Enable/disable SD-WAN service gateway. type: str", "with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type", "2019 Fortinet, Inc. # # This program is free software:", "this traffic volume threshold is reached, new sessions spill over", "}}\" username: \"{{ username }}\" password: \"{{ password }}\" vdom:", "instance of the FortiGate that can be configured and used", "- health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" id: \"102\" src: - name:", "dscp_forward: description: - Enable/disable forward traffic DSCP tag. type: str", "name. Source firewall.address.name firewall.addrgrp.name. required: true type: str src_negate: description:", "\"type\": \"int\"}, \"default\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]},", "password: description: - Twamp controller password in authentication mode type:", "- Type of service bit pattern. type: str tos_mask: description:", "\"disable\"]}, \"health_check\": {\"required\": False, \"type\": \"list\", \"options\": { \"addr_mode\": {\"required\":", "Enable/disable use of Internet service for application-based load balancing. type:", "Port number used to communicate with the server over the", "SD-WAN (formerly virtual WAN link). default: null type: dict suboptions:", "of the server. type: str sla: description: - Service level", "firewall.internet-service.id)\" jitter_weight: \"87\" latency_weight: \"88\" link_cost_factor: \"latency\" link_cost_threshold: \"90\" member:", "server. type: str sla: description: - Service level agreement (SLA).", "status['status'] == \"success\" or \\ status['http_method'] == \"DELETE\" and status['http_status']", "suboptions: name: description: - Interface name. Source system.interface.name. required: true", "\"bandwidth_weight\": {\"required\": False, \"type\": \"int\"}, \"default\": {\"required\": False, \"type\": \"str\",", "}}, \"end_port\": {\"required\": False, \"type\": \"int\"}, \"gateway\": {\"required\": False, \"type\":", "License for more details. # # You should have received", "address group name. Source firewall.address.name firewall.addrgrp.name. required: true type: str", "(0 - 100). type: int threshold_alert_jitter: description: - Alert threshold", "description: - Internet Service group list. type: list suboptions: name:", "str choices: - enable - disable health_check: description: - SD-WAN", "user.local.name. required: true type: str status: description: - Enable/disable SD-WAN.", "\"type\": \"str\", \"choices\": [\"none\", \"authentication\"]}, \"server\": {\"required\": False, \"type\": \"str\"},", "\"str\", \"choices\": [\"enable\", \"disable\"]} }}, \"load_balance_mode\": {\"required\": False, \"type\": \"str\",", "address used in the health-check packet to the server. type:", "in module.params and module.params['password'] is not None if not legacy_mode:", "int latency_threshold: description: - Latency for SLA to make decision", "str dst_negate: description: - Enable/disable negation of destination address match.", "str internet_service: description: - Enable/disable use of Internet service for", "(source system.interface.name)\" fail_detect: \"enable\" health_check: - addr_mode: \"ipv4\" failtime: \"8\"", "protocol is HTTP. type: str http_match: description: - Response string", "size of a twamp test session, type: int password: description:", "False, \"type\": \"int\"}, \"quality_link\": {\"required\": False, \"type\": \"int\"}, \"route_tag\": {\"required\":", "default: true version_added: 2.9 system_virtual_wan_link: description: - Configure redundant internet", "When this traffic volume threshold is reached, new sessions spill", "in seconds when switching from the back-up member to the", "volume, 0 - 255). type: int weight: description: - Weight", "the Internet and determine how SD-WAN verifies that the FortiGate", "if data['system_virtual_wan_link']: resp = system_virtual_wan_link(data, fos) return not is_successful_status(resp), \\", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"fail_detect\": {\"required\": False,", "{\"required\": True, \"type\": \"int\"}, \"jitter_threshold\": {\"required\": False, \"type\": \"int\"}, \"latency_threshold\":", "- Create SD-WAN rules or priority rules (also called services)", "- Custom Internet Service group name. Source firewall.internet-service-custom-group.name. required: true", "\"type\": \"str\", \"default\": \"root\"}, \"https\": {\"required\": False, \"type\": \"bool\", \"default\":", "of the interface (0 - 4294967295). Used for SD-WAN rules", "of available bidirectional bandwidth in the formula of custom-profile-1. type:", "description: Version of the FortiGate returned: always type: str sample:", "default: description: - Enable/disable use of SD-WAN as default service.", "details. # # You should have received a copy of", "field in the HTTP header. type: str http_get: description: -", "internet_service_custom: description: - Custom Internet service name list. type: list", "update_static_route: description: - Enable/disable updating the static route. type: str", "ipv6 failtime: description: - Number of failures before server is", "(ms). type: int threshold_warning_packetloss: description: - Warning threshold for packet", "refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host'", "type: list suboptions: name: description: - Address6 or address6 group", "description: - Indicates if the requests towards FortiGate must use", "- Control-based Internet Service group list. type: list suboptions: name:", "description: Name of the table used to fulfill the request", "== \"DELETE\" and status['http_status'] == 404 def fortios_system(data, fos): if", "published by # the Free Software Foundation, either version 3", "- Destination address6 name. type: list suboptions: name: description: -", "- 4294967295). Used for SD-WAN rules or priority rules. type:", "SD-WAN. type: str choices: - auto - manual - priority", "False, \"type\": \"str\"}, \"hold_down_time\": {\"required\": False, \"type\": \"int\"}, \"id\": {\"required\":", "None and \\ 'username' in module.params and module.params['username'] is not", "fail_detect: description: - Enable/disable SD-WAN Internet connection status checking (failure", "description: - Control-based Internet Service group list. type: list suboptions:", "\"root\" ssl_verify: \"False\" tasks: - name: Configure redundant internet connections", "{\"required\": False, \"type\": \"int\"}, \"gateway\": {\"required\": False, \"type\": \"str\", \"choices\":", "name of the server. type: str sla: description: - Service", "number list. type: list suboptions: seq_num: description: - Member sequence", "\"type\": \"int\"}, \"threshold_warning_jitter\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_latency\": {\"required\": False,", "False, \"type\": \"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\"]}, \"packetloss_threshold\": {\"required\": False,", "last operation applied returned: always type: str sample: \"200\" mkey:", "\"disable\"]}, \"src6\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "\"ipv4\" bandwidth_weight: \"54\" default: \"enable\" dscp_forward: \"enable\" dscp_forward_tag: \"<your_own_value>\" dscp_reverse:", "import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler", "choices: - ipv4 - ipv6 failtime: description: - Number of", "Weight of this interface for weighted load balancing. (0 -", "module is able to configure a FortiGate or FortiOS (FOS)", "[\"enable\", \"disable\"]}, \"dscp_reverse_tag\": {\"required\": False, \"type\": \"str\"}, \"dst\": {\"required\": False,", "True, \"type\": \"int\"}, \"input_device\": {\"required\": False, \"type\": \"list\", \"options\": {", "def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem", "\"sla\"]}, \"name\": {\"required\": False, \"type\": \"str\"}, \"packet_loss_weight\": {\"required\": False, \"type\":", "feature and virtual_wan_link category. Examples include all parameters and values", "\"<your_own_value>\" source6: \"<your_own_value>\" spillover_threshold: \"48\" status: \"disable\" volume_ratio: \"50\" weight:", "\"addr_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"failtime\": {\"required\":", "int link_cost_factor: description: - Criteria on which to base link", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"groups\": {\"required\": False, \"type\": \"list\",", "fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI", "type: str choices: - enable - disable dscp_forward: description: -", "received a copy of the GNU General Public License #", "to communicate with the server if the protocol if the", "description: - String in the http-agent field in the HTTP", "name. type: list suboptions: name: description: - User name. Source", "decision in percentage. (0 - 100). type: int threshold_alert_jitter: description:", "system.interface.name)\" priority: \"44\" seq_num: \"45\" source: \"<your_own_value>\" source6: \"<your_own_value>\" spillover_threshold:", "packet-loss packetloss_threshold: description: - Packet loss for SLA to make", "in option_list: if attribute in json and json[attribute] is not", "\"default_name_61 (source firewall.address.name firewall.addrgrp.name)\" dst_negate: \"enable\" dst6: - name: \"default_name_64", "\"type\": \"str\"}, \"username\": {\"required\": False, \"type\": \"str\"}, \"password\": {\"required\": False,", "if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos =", "# (at your option) any later version. # # This", "sla: - id: \"24\" jitter_threshold: \"25\" latency_threshold: \"26\" link_cost_factor: \"latency\"", "\"gateway\": {\"required\": False, \"type\": \"str\"}, \"gateway6\": {\"required\": False, \"type\": \"str\"},", "See the # GNU General Public License for more details.", "the protocol is HTTP. type: str interval: description: - Status", "False, \"type\": \"str\"}, \"http_match\": {\"required\": False, \"type\": \"str\"}, \"interval\": {\"required\":", "} } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to", "- Protocol used to determine if the FortiGate can communicate", "\"type\": \"str\"}, \"port\": {\"required\": False, \"type\": \"int\"}, \"protocol\": {\"required\": False,", "path: description: Path of the table used to fulfill the", "{\"required\": True, \"type\": \"int\"} }}, \"jitter_weight\": {\"required\": False, \"type\": \"int\"},", "firewall.addrgrp.name. required: true type: str src_negate: description: - Enable/disable negation", "match. type: str choices: - enable - disable src6: description:", "check name. required: true type: str packet_size: description: - Packet", "#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019", "Coefficient of latency in the formula of custom-profile-1. type: int", "packet to the server. type: str source6: description: - Source", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]} }}, \"load_balance_mode\": {\"required\": False,", "the virtual-wan-link. type: list suboptions: comment: description: - Comments. type:", "description: - Physical interface name. Source system.interface.name. required: true type:", "this interface (0 - 16776000 kbit/s). When this traffic volume", "FortiGate certificate must be verified by a proper CA. type:", "that it will be useful, # but WITHOUT ANY WARRANTY;", "name: description: - Custom Internet service name. Source firewall.internet-service-custom.name. required:", "description: - Internet Service group name. Source firewall.internet-service-group.name. required: true", "- Enable/disable reverse traffic DSCP tag. type: str choices: -", "Criteria on which to base link selection. type: str choices:", "Internet Service ID. required: true type: int internet_service_ctrl_group: description: -", "the SD-WAN. type: list suboptions: addr_mode: description: - Address mode", "\"42\" interface: \"<your_own_value> (source system.interface.name)\" priority: \"44\" seq_num: \"45\" source:", "adjusted to datasources before usage. Tested with FOS v6.0.5 version_added:", "Percentage threshold change of link cost values that will result", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward_tag\": {\"required\": False,", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"tos\": {\"required\": False,", "module.params and module.params['username'] is not None and \\ 'password' in", "description: - Priority rule name. type: str packet_loss_weight: description: -", "vdom: \"{{ vdom }}\" https: \"False\" system_virtual_wan_link: fail_alert_interfaces: - name:", "description: - Jitter for SLA to make decision in milliseconds.", "main(): fields = { \"host\": {\"required\": False, \"type\": \"str\"}, \"username\":", "name. Source user.local.name. required: true type: str status: description: -", "if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem)", "packet loss (percentage). type: int update_cascade_interface: description: - Enable/disable update", "\"int\"}, \"members\": {\"required\": False, \"type\": \"list\", \"options\": { \"seq_num\": {\"required\":", "Internet service name list. type: list suboptions: name: description: -", "\"type\": \"str\"}, \"password\": {\"required\": False, \"type\": \"str\", \"default\": \"\", \"no_log\":", "alerted. type: list suboptions: name: description: - Physical interface name.", "str choices: - source-ip-based - weight-based - usage-based - source-dest-ip-based", "\"type\": \"str\", \"default\": \"\", \"no_log\": True}, \"vdom\": {\"required\": False, \"type\":", "\"id\" name: description: Name of the table used to fulfill", "interface (0 - 16776000 kbit/s). When this traffic volume threshold", "Fortinet, Inc. # # This program is free software: you", "resp = system_virtual_wan_link(data, fos) return not is_successful_status(resp), \\ resp['status'] ==", "\"int\"} }}, \"jitter_weight\": {\"required\": False, \"type\": \"int\"}, \"latency_weight\": {\"required\": False,", "system feature and virtual_wan_link category. Examples include all parameters and", "- name: \"default_name_61 (source firewall.address.name firewall.addrgrp.name)\" dst_negate: \"enable\" dst6: -", "description: Internal revision number returned: always type: str sample: \"17.0.2.10658\"", "Serial number of the unit returned: always type: str sample:", "name. Source firewall.address6.name firewall.addrgrp6.name. required: true type: str end_port: description:", "False, \"type\": \"str\"}, \"id\": {\"required\": False, \"type\": \"int\"} }}, \"src\":", "health checking. Identify a server on the Internet and determine", "(1 - 3600). type: int security_mode: description: - Twamp controller", "system.interface.name. type: str priority: description: - Priority of the interface", "SD-WAN rules or priority rules. type: int seq_num: description: -", "True}, \"vdom\": {\"required\": False, \"type\": \"str\", \"default\": \"root\"}, \"https\": {\"required\":", "- Virtual WAN Link health-check. Source system.virtual-wan-link.health-check.name. type: str id:", "route_tag: \"99\" sla: - health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" id: \"102\"", "int threshold_alert_jitter: description: - Alert threshold for jitter (ms). type:", "\"str\", \"default\": \"\", \"no_log\": True}, \"vdom\": {\"required\": False, \"type\": \"str\",", "is reached, new sessions spill over to other interfaces in", "\"type\": \"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\", \"inbandwidth\", \"outbandwidth\", \"bibandwidth\", \"custom-profile-1\"]},", "[\"latency\", \"jitter\", \"packet-loss\"]}, \"packetloss_threshold\": {\"required\": False, \"type\": \"int\"} }}, \"threshold_alert_jitter\":", "be alerted. type: list suboptions: name: description: - Physical interface", "internet connections using SD-WAN (formerly virtual WAN link) in Fortinet's", "list suboptions: name: description: - Internet Service group name. Source", "False, \"type\": \"str\"}, \"sla\": {\"required\": False, \"type\": \"list\", \"options\": {", "outbandwidth - bibandwidth - custom-profile-1 link_cost_threshold: description: - Percentage threshold", "choices: - enable - disable groups: description: - User groups.", "\"50\" weight: \"51\" service: - addr_mode: \"ipv4\" bandwidth_weight: \"54\" default:", "\\ resp['status'] == \"success\", \\ resp def main(): fields =", "bandwidth_weight: \"54\" default: \"enable\" dscp_forward: \"enable\" dscp_forward_tag: \"<your_own_value>\" dscp_reverse: \"enable\"", "# # This program is distributed in the hope that", "sessions are distributed to physical interfaces in the SD-WAN. type:", "- Waiting period in seconds when switching from the back-up", "interface. Usually the default gateway of the Internet service provider", "this interface in the SD-WAN. type: str choices: - disable", "FortiGate on last operation applied returned: always type: str sample:", "threshold for jitter (ms). type: int threshold_alert_latency: description: - Alert", "\"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"tos\": {\"required\":", "the HTTP header. type: str http_get: description: - URL used", "= { \"host\": {\"required\": False, \"type\": \"str\"}, \"username\": {\"required\": False,", "\"type\": \"int\"}, \"member\": {\"required\": False, \"type\": \"int\"}, \"mode\": {\"required\": False,", "int threshold_alert_packetloss: description: - Alert threshold for packet loss (percentage).", "\"type\": \"int\"}, \"link_cost_factor\": {\"required\": False, \"type\": \"str\", \"choices\": [\"latency\", \"jitter\",", "Free Software Foundation, either version 3 of the License, or", "suboptions: id: description: - SLA ID. required: true type: int", "- Custom Internet Service group list. type: list suboptions: name:", "and/or modify # it under the terms of the GNU", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "str internet_service_id: description: - Internet service ID list. type: list", "usage. Tested with FOS v6.0.5 version_added: \"2.8\" author: - <NAME>", "values that will result in policy route regeneration (0 -", "type: bool default: true ssl_verify: description: - Ensures FortiGate certificate", "\"type\": \"str\"} }}, \"internet_service_custom_group\": {\"required\": False, \"type\": \"list\", \"options\": {", "verified by a proper CA. type: bool default: true version_added:", "Internet service ID list. type: list suboptions: id: description: -", "\"type\": \"str\", \"choices\": [\"disable\", \"enable\"]}, \"volume_ratio\": {\"required\": False, \"type\": \"int\"},", "- Internet service ID list. type: list suboptions: id: description:", "\"type\": \"str\"} }}, \"end_port\": {\"required\": False, \"type\": \"int\"}, \"gateway\": {\"required\":", "\"type\": \"int\"} }}, \"internet_service_ctrl_group\": {\"required\": False, \"type\": \"list\", \"options\": {", "it and/or modify # it under the terms of the", "configure a FortiGate or FortiOS (FOS) device by allowing the", "(0 - 10000000). type: int member: description: - Member sequence", "underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v", "threshold is reached, new sessions spill over to other interfaces", "a FortiGate or FortiOS (FOS) device by allowing the user", "str gateway: description: - The default gateway for this interface.", "= data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if", "\"interval\": {\"required\": False, \"type\": \"int\"}, \"members\": {\"required\": False, \"type\": \"list\",", "{\"required\": False, \"type\": \"str\"}, \"dst\": {\"required\": False, \"type\": \"list\", \"options\":", "type: list suboptions: name: description: - Interface name. Source system.interface.name.", "\"87\" latency_weight: \"88\" link_cost_factor: \"latency\" link_cost_threshold: \"90\" member: \"91\" mode:", "\"int\"}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"tos\":", "{\"required\": False, \"type\": \"str\"}, \"source6\": {\"required\": False, \"type\": \"str\"}, \"spillover_threshold\":", "considered recovered (1 - 3600). type: int security_mode: description: -", "firewall.address6.name firewall.addrgrp6.name. required: true type: str end_port: description: - End", "\"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"failtime\": {\"required\": False, \"type\": \"int\"}, \"http_agent\":", "\"Comments.\" gateway: \"<your_own_value>\" gateway6: \"<your_own_value>\" ingress_spillover_threshold: \"42\" interface: \"<your_own_value> (source", "fos.login(host, username, password, verify=ssl_verify) def filter_system_virtual_wan_link_data(json): option_list = ['fail_alert_interfaces', 'fail_detect',", "data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data", "description: - Alert threshold for latency (ms). type: int threshold_alert_packetloss:", "(0 - 10000000). type: int id: description: - Priority rule", "mkey: description: Master key (id) used in the last call", "type: int input_device: description: - Source interface name. type: list", "str fail_detect: description: - Enable/disable SD-WAN Internet connection status checking", "description: - SLA ID. required: true type: int jitter_threshold: description:", "enable ''' EXAMPLES = ''' - hosts: localhost vars: host:", "it under the terms of the GNU General Public License", "mode type: str port: description: - Port number used to", "the operation's result returned: always type: str sample: \"success\" vdom:", "false username: description: - FortiOS or FortiGate username. type: str", "program is distributed in the hope that it will be", "\"int\"}, \"latency_weight\": {\"required\": False, \"type\": \"int\"}, \"link_cost_factor\": {\"required\": False, \"type\":", "GNU General Public License # along with this program. If", "Sequence number(1-255). type: int source: description: - Source IP address", "\"type\": \"int\"}, \"members\": {\"required\": False, \"type\": \"list\", \"options\": { \"seq_num\":", "FortiGate username. type: str required: false password: description: - FortiOS", "sla: description: - Service level agreement (SLA). type: list suboptions:", "[\"none\", \"authentication\"]}, \"server\": {\"required\": False, \"type\": \"str\"}, \"sla\": {\"required\": False,", "ratio (this value / sum of all values = percentage", "description: - Virtual WAN Link health-check. Source system.virtual-wan-link.health-check.name. type: str", "return fos.set('system', 'virtual-wan-link', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] ==", "formula of custom-profile-1. type: int priority_members: description: - Member sequence", "import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host", "\"health_check\": {\"required\": False, \"type\": \"list\", \"options\": { \"addr_mode\": {\"required\": False,", "False, \"type\": \"int\"}, \"default\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "}}, \"internet_service_ctrl_group\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General", "other interfaces in the SD-WAN. type: int status: description: -", "Create SD-WAN rules or priority rules (also called services) to", "service ID. Source firewall.internet-service.id. required: true type: int jitter_weight: description:", "Internet Service group list. type: list suboptions: name: description: -", "- weight-based - usage-based - source-dest-ip-based - measured-volume-based members: description:", "name: description: - User name. Source user.local.name. required: true type:", "False, \"type\": \"int\"}, \"mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"auto\",", "\"type\": \"str\"}, \"http_get\": {\"required\": False, \"type\": \"str\"}, \"http_match\": {\"required\": False,", "requests towards FortiGate must use HTTPS protocol. type: bool default:", "- enable - disable update_static_route: description: - Enable/disable updating the", "- Enable/disable forward traffic DSCP tag. type: str choices: -", "type: str choices: - enable - disable dscp_reverse_tag: description: -", "- Custom Internet service name list. type: list suboptions: name:", "data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {}", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_reverse_tag\": {\"required\": False,", "packetloss_threshold: \"28\" threshold_alert_jitter: \"29\" threshold_alert_latency: \"30\" threshold_alert_packetloss: \"31\" threshold_warning_jitter: \"32\"", "\"str\", \"choices\": [\"ping\", \"tcp-echo\", \"udp-echo\", \"http\", \"twamp\", \"ping6\"]}, \"recoverytime\": {\"required\":", "\"enable\" dscp_reverse_tag: \"<your_own_value>\" dst: - name: \"default_name_61 (source firewall.address.name firewall.addrgrp.name)\"", "the FortiGate returned: always type: str sample: \"v5.6.3\" ''' from", "\"str\", \"choices\": [\"enable\", \"disable\"]}, \"dst6\": {\"required\": False, \"type\": \"list\", \"options\":", "name. type: str packet_loss_weight: description: - Coefficient of packet-loss in", "Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8", "Indicates if the requests towards FortiGate must use HTTPS protocol.", "end_port: description: - End destination port number. type: int gateway:", "service evaluated bits. type: str users: description: - User name.", "default: true ssl_verify: description: - Ensures FortiGate certificate must be", "SD-WAN. type: int status: description: - Enable/disable this interface in", "{\"required\": False, \"type\": \"list\", \"options\": { \"health_check\": {\"required\": False, \"type\":", "Destination address name. type: list suboptions: name: description: - Address", "enable - disable groups: description: - User groups. type: list", "disable dst6: description: - Destination address6 name. type: list suboptions:", "firewall.address.name firewall.addrgrp.name)\" src_negate: \"enable\" src6: - name: \"default_name_107 (source firewall.address6.name", "\"auto\" name: \"default_name_93\" packet_loss_weight: \"94\" priority_members: - seq_num: \"96 (source", "\"disable\"]}, \"update_static_route\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]} }},", "version_added: 2.9 system_virtual_wan_link: description: - Configure redundant internet connections using", "str users: description: - User name. type: list suboptions: name:", "has_changed, result = fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from", "using SD-WAN (formerly virtual WAN link). default: null type: dict", "\"ssl_verify\": {\"required\": False, \"type\": \"bool\", \"default\": True}, \"system_virtual_wan_link\": { \"required\":", "system.interface.name. required: true type: str fail_detect: description: - Enable/disable SD-WAN", "decision in milliseconds. (0 - 10000000). type: int latency_threshold: description:", "Source system.virtual-wan-link.health-check.name. type: str hold_down_time: description: - Waiting period in", "for more details. # # You should have received a", "description: - Custom Internet service name list. type: list suboptions:", "{\"required\": False, \"type\": \"int\"}, \"sla\": {\"required\": False, \"type\": \"list\", \"options\":", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"end_port\": {\"required\": False,", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_custom_group\": {\"required\":", "service bit pattern. type: str tos_mask: description: - Type of", "HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not", "type: str source6: description: - Source IPv6 address used in", "source: \"<your_own_value>\" source6: \"<your_own_value>\" spillover_threshold: \"48\" status: \"disable\" volume_ratio: \"50\"", "content into FortiGate returned: always type: str sample: 'PUT' http_status:", "fail_alert_interfaces: description: - Physical interfaces that will be alerted. type:", "\"type\": \"int\"}, \"source\": {\"required\": False, \"type\": \"str\"}, \"source6\": {\"required\": False,", "base link selection. type: str choices: - latency - jitter", "Source system.virtual-wan-link.members.seq-num. type: int protocol: description: - Protocol number. type:", "time between attempting to connect to the server (1 -", "your option) any later version. # # This program is", "update_cascade_interface: \"enable\" update_static_route: \"enable\" load_balance_mode: \"source-ip-based\" members: - comment: \"Comments.\"", "groups. type: list suboptions: name: description: - Group name. Source", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "description: - Egress spillover threshold for this interface (0 -", "protocol if the protocol is HTTP. type: str http_match: description:", "choices: - enable - disable internet_service_ctrl: description: - Control-based Internet", "(at your option) any later version. # # This program", "(formerly virtual WAN link) in Fortinet's FortiOS and FortiGate. description:", "type: int service: description: - Create SD-WAN rules or priority", "= ''' - hosts: localhost vars: host: \"192.168.122.40\" username: \"admin\"", "description: - Protocol used to determine if the FortiGate can", "with the server over the selected protocol. type: int protocol:", "int threshold_warning_latency: description: - Warning threshold for latency (ms). type:", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"health_check\":", "type: str dst: description: - Destination address name. type: list", "name: \"default_name_107 (source firewall.address6.name firewall.addrgrp6.name)\" start_port: \"108\" status: \"enable\" tos:", "[\"enable\", \"disable\"]} }}, \"load_balance_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"source-ip-based\",", "elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for", "\"options\": { \"id\": {\"required\": True, \"type\": \"int\"} }}, \"jitter_weight\": {\"required\":", "type: list suboptions: id: description: - Control-based Internet Service ID.", "{\"required\": False, \"type\": \"int\"}, \"latency_threshold\": {\"required\": False, \"type\": \"int\"}, \"link_cost_factor\":", "list. type: list suboptions: name: description: - Internet Service group", "str choices: - ipv4 - ipv6 bandwidth_weight: description: - Coefficient", "\"{{ vdom }}\" https: \"False\" system_virtual_wan_link: fail_alert_interfaces: - name: \"default_name_4", "SLA to make decision in milliseconds. (0 - 10000000). type:", "of custom-profile-1. type: int priority_members: description: - Member sequence number", "\"type\": \"str\"}, \"sla\": {\"required\": False, \"type\": \"list\", \"options\": { \"id\":", "False, \"type\": \"int\"}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "FortiGate can communicate with the server. type: str choices: -", "true type: str internet_service_id: description: - Internet service ID list.", "those defined previously. A vdom is a virtual instance of", "of custom-profile-1. type: int link_cost_factor: description: - Link cost factor.", "Link health-check. Source system.virtual-wan-link.health-check.name. type: str id: description: - SLA", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service\":", "- Interface name. Source system.interface.name. required: true type: str internet_service:", "\"options\": { \"seq_num\": {\"required\": False, \"type\": \"int\"} }}, \"protocol\": {\"required\":", "balancing. (0 - 255) More traffic is directed to interfaces", "required: true type: str internet_service_custom: description: - Custom Internet service", "}}, \"internet_service_custom\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "False, \"type\": \"list\", \"options\": { \"id\": {\"required\": True, \"type\": \"int\"}", "if the FortiGate can communicate with the server. type: str", "used to communicate with the server if the protocol if", "gateway6: description: - IPv6 gateway. type: str ingress_spillover_threshold: description: -", "resp['status'] == \"success\", \\ resp def main(): fields = {", "choices: - enable - disable update_static_route: description: - Enable/disable updating", "https: \"False\" system_virtual_wan_link: fail_alert_interfaces: - name: \"default_name_4 (source system.interface.name)\" fail_detect:", "\"65\" gateway: \"enable\" groups: - name: \"default_name_68 (source user.group.name)\" health_check:", "- Service level agreement (SLA). type: list suboptions: id: description:", "\"id\": {\"required\": True, \"type\": \"int\"} }}, \"internet_service_ctrl_group\": {\"required\": False, \"type\":", "suboptions: name: description: - User name. Source user.local.name. required: true", "true type: int internet_service_ctrl_group: description: - Control-based Internet Service group", "interface in the SD-WAN. type: str choices: - disable -", "from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host']", "enable - disable dst6: description: - Destination address6 name. type:", "str choices: - latency - jitter - packet-loss packetloss_threshold: description:", "\"disable\"]}, \"internet_service_ctrl\": {\"required\": False, \"type\": \"list\", \"options\": { \"id\": {\"required\":", "interfaces that will be alerted. type: list suboptions: name: description:", "can communicate with the server. type: str choices: - ping", "connected to. type: str gateway6: description: - IPv6 gateway. type:", "reciprocal of available bidirectional bandwidth in the formula of custom-profile-1.", "\"type\": \"dict\", \"default\": None, \"options\": { \"fail_alert_interfaces\": {\"required\": False, \"type\":", "CA. type: bool default: true version_added: 2.9 system_virtual_wan_link: description: -", "in the SD-WAN. type: str choices: - disable - enable", "= FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_system(module.params, fos)", "\"type\": \"int\"} }}, \"name\": {\"required\": True, \"type\": \"str\"}, \"packet_size\": {\"required\":", "\"96 (source system.virtual-wan-link.members.seq-num)\" protocol: \"97\" quality_link: \"98\" route_tag: \"99\" sla:", "'health_check', 'load_balance_mode', 'members', 'service', 'status'] dictionary = {} for attribute", "description: - FortiOS or FortiGate password. type: str default: \"\"", "internet_service_group: description: - Internet Service group list. type: list suboptions:", "- Alert threshold for jitter (ms). type: int threshold_alert_latency: description:", "= ''' --- module: fortios_system_virtual_wan_link short_description: Configure redundant internet connections", "protocol is HTTP. type: str interval: description: - Status check", "- sla name: description: - Priority rule name. type: str", "IPv4 route map route-tag. type: int sla: description: - Service", "{\"required\": False, \"type\": \"str\"}, \"interval\": {\"required\": False, \"type\": \"int\"}, \"members\":", "\"type\": \"int\"}, \"threshold_alert_latency\": {\"required\": False, \"type\": \"int\"}, \"threshold_alert_packetloss\": {\"required\": False,", "id: description: - Priority rule ID (1 - 4000). required:", "''' RETURN = ''' build: description: Build number of the", "str internet_service_custom_group: description: - Custom Internet Service group list. type:", "Group name. Source user.group.name. required: true type: str health_check: description:", "determine if the FortiGate can communicate with the server. type:", "(id) used in the last call to FortiGate returned: success", "successful responses received before server is considered recovered (1 -", "['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module:", "link) in Fortinet's FortiOS and FortiGate. description: - This module", "- 3600). type: int security_mode: description: - Twamp controller security", "= underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data)) return fos.set('system', 'virtual-wan-link', data=filtered_data, vdom=vdom) def is_successful_status(status): return", "resp def main(): fields = { \"host\": {\"required\": False, \"type\":", "\"choices\": [\"disable\", \"enable\"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False)", "new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def", "\"<your_own_value>\" ingress_spillover_threshold: \"42\" interface: \"<your_own_value> (source system.interface.name)\" priority: \"44\" seq_num:", "isinstance(data, dict): new_data = {} for k, v in data.items():", "Enable/disable forward traffic DSCP tag. type: str choices: - enable", "{\"required\": True, \"type\": \"str\"} }}, \"internet_service_custom\": {\"required\": False, \"type\": \"list\",", "(source system.virtual-wan-link.members.seq-num)\" name: \"default_name_15\" packet_size: \"16\" password: \"<<PASSWORD>>\" port: \"18\"", "\"default_name_68 (source user.group.name)\" health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" hold_down_time: \"70\" id:", "\"http_match\": {\"required\": False, \"type\": \"str\"}, \"interval\": {\"required\": False, \"type\": \"int\"},", "connections using SD-WAN (formerly virtual WAN link) in Fortinet's FortiOS", "type: int link_cost_factor: description: - Criteria on which to base", "str sample: \"v5.6.3\" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection", "required: true type: str internet_service_group: description: - Internet Service group", "\"threshold_alert_jitter\": {\"required\": False, \"type\": \"int\"}, \"threshold_alert_latency\": {\"required\": False, \"type\": \"int\"},", "- 255) More traffic is directed to interfaces with higher", "\"int\"}, \"route_tag\": {\"required\": False, \"type\": \"int\"}, \"sla\": {\"required\": False, \"type\":", "always type: str sample: 'PUT' http_status: description: Last result given", "- Reverse traffic DSCP tag. type: str dst: description: -", "suboptions: seq_num: description: - Member sequence number. Source system.virtual-wan-link.members.seq-num. type:", "- Packet size of a twamp test session, type: int", "number of the unit returned: always type: str sample: \"FGVMEVYYQT3AB5352\"", "\"choices\": [\"auto\", \"manual\", \"priority\", \"sla\"]}, \"name\": {\"required\": False, \"type\": \"str\"},", "suboptions: name: description: - Physical interface name. Source system.interface.name. required:", "in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data =", "(1 - 4000). required: true type: int input_device: description: -", "default: root https: description: - Indicates if the requests towards", "\"ingress_spillover_threshold\": {\"required\": False, \"type\": \"int\"}, \"interface\": {\"required\": False, \"type\": \"str\"},", "type: int status: description: - Enable/disable SD-WAN service. type: str", "{\"required\": True, \"type\": \"str\"}, \"packet_size\": {\"required\": False, \"type\": \"int\"}, \"password\":", "type: str hold_down_time: description: - Waiting period in seconds when", "\"86 (source firewall.internet-service.id)\" jitter_weight: \"87\" latency_weight: \"88\" link_cost_factor: \"latency\" link_cost_threshold:", "\"int\"}, \"http_agent\": {\"required\": False, \"type\": \"str\"}, \"http_get\": {\"required\": False, \"type\":", "int members: description: - Member sequence number list. type: list", "update_cascade_interface: description: - Enable/disable update cascade interface. type: str choices:", "type: str spillover_threshold: description: - Egress spillover threshold for this", "jitter_weight: \"87\" latency_weight: \"88\" link_cost_factor: \"latency\" link_cost_threshold: \"90\" member: \"91\"", "description: - Number of failures before server is considered lost", "name: description: - Control-based Internet Service group name. Source application.group.name.", "firewall.internet-service.id. required: true type: int jitter_weight: description: - Coefficient of", "\"str\"}, \"password\": {\"required\": False, \"type\": \"str\", \"default\": \"\", \"no_log\": True},", "Link cost factor. type: str choices: - latency - jitter", "should have received a copy of the GNU General Public", "int threshold_alert_latency: description: - Alert threshold for latency (ms). type:", "\"71\" input_device: - name: \"default_name_73 (source system.interface.name)\" internet_service: \"enable\" internet_service_ctrl:", "Build number of the fortigate image returned: always type: str", "\"DELETE\" and status['http_status'] == 404 def fortios_system(data, fos): if data['system_virtual_wan_link']:", "address6 group name. Source firewall.address6.name firewall.addrgrp6.name. required: true type: str", "library developed by Fortinet - Run as a local_action in", "\"internet_service_ctrl\": {\"required\": False, \"type\": \"list\", \"options\": { \"id\": {\"required\": True,", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_custom\": {\"required\": False,", "\"internet_service_group\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "tcp-echo - udp-echo - http - twamp - ping6 recoverytime:", "- enable - disable dscp_reverse_tag: description: - Reverse traffic DSCP", "need to be adjusted to datasources before usage. Tested with", "description: - Packet loss for SLA to make decision in", "data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == \"success\" or \\", "\"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward_tag\": {\"required\": False, \"type\": \"str\"}, \"dscp_reverse\":", "name. Source system.interface.name. required: true type: str fail_detect: description: -", "list. type: list suboptions: id: description: - Internet service ID.", "bool default: true version_added: 2.9 system_virtual_wan_link: description: - Configure redundant", "description: - Source address6 name. type: list suboptions: name: description:", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"groups\": {\"required\": False,", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"src_negate\":", "to connect to the server (1 - 3600 sec). type:", "forward traffic DSCP tag. type: str choices: - enable -", "rule sets the priority of interfaces in the SD-WAN. type:", "is required\") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result", "\"type\": \"int\"} }}, \"src\": {\"required\": False, \"type\": \"list\", \"options\": {", "in the http-agent field in the HTTP header. type: str", "service gateway. type: str choices: - enable - disable groups:", "connections using SD-WAN (formerly virtual WAN link). default: null type:", "{\"required\": False, \"type\": \"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\"]}, \"packetloss_threshold\": {\"required\":", "\"list\", \"options\": { \"id\": {\"required\": True, \"type\": \"int\"} }}, \"internet_service_ctrl_group\":", "\"48\" status: \"disable\" volume_ratio: \"50\" weight: \"51\" service: - addr_mode:", "Source firewall.internet-service-custom.name. required: true type: str internet_service_custom_group: description: - Custom", "User name. type: list suboptions: name: description: - User name.", "- name: \"default_name_84 (source firewall.internet-service-group.name)\" internet_service_id: - id: \"86 (source", "More traffic is directed to interfaces with higher weights. type:", "is not None and \\ 'username' in module.params and module.params['username']", "name: description: Name of the table used to fulfill the", "must use HTTPS protocol. type: bool default: true ssl_verify: description:", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_custom_group\":", "\"int\"} }}, \"src\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "str choices: - enable - disable groups: description: - User", "\"type\": \"int\"}, \"jitter_threshold\": {\"required\": False, \"type\": \"int\"}, \"latency_threshold\": {\"required\": False,", "(0 - 16776000 kbit/s). When this traffic volume threshold is", "description: - Custom Internet Service group name. Source firewall.internet-service-custom-group.name. required:", "group list. type: list suboptions: name: description: - Custom Internet", "virtual instance of the FortiGate that can be configured and", "of a twamp test session, type: int password: description: -", "- disable load_balance_mode: description: - Algorithm or mode to use", "}}\" password: \"{{ password }}\" vdom: \"{{ vdom }}\" https:", "\"default_name_78 (source application.group.name)\" internet_service_custom: - name: \"default_name_80 (source firewall.internet-service-custom.name)\" internet_service_custom_group:", "false password: description: - FortiOS or FortiGate password. type: str", "in percentage. (0 - 100). type: int threshold_alert_jitter: description: -", "member (0 - 10000000). type: int id: description: - Priority", "\"packetloss_threshold\": {\"required\": False, \"type\": \"int\"} }}, \"threshold_alert_jitter\": {\"required\": False, \"type\":", "'load_balance_mode', 'members', 'service', 'status'] dictionary = {} for attribute in", "description: - Warning threshold for jitter (ms). type: int threshold_warning_latency:", "\"200\" mkey: description: Master key (id) used in the last", "Public License for more details. # # You should have", "False, \"type\": \"int\"}, \"link_cost_factor\": {\"required\": False, \"type\": \"str\", \"choices\": [\"latency\",", "description: - Start destination port number. type: int status: description:", "DSCP tag. type: str choices: - enable - disable dscp_forward_tag:", "in the last call to FortiGate returned: success type: str", "False, \"type\": \"str\"}, \"ingress_spillover_threshold\": {\"required\": False, \"type\": \"int\"}, \"interface\": {\"required\":", "in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: -", "type: str required: false password: description: - FortiOS or FortiGate", "- Jitter for SLA to make decision in milliseconds. (0", "Enable/disable SD-WAN Internet connection status checking (failure detection). type: str", "fulfill the request returned: always type: str sample: \"webfilter\" revision:", "quality_link: description: - Quality grade. type: int route_tag: description: -", "required: true type: str end_port: description: - End destination port", "SLA ID. type: int src: description: - Source address name.", "username: \"{{ username }}\" password: \"{{ password }}\" vdom: \"{{", "the SD-WAN. type: str choices: - disable - enable volume_ratio:", "- Enable/disable SD-WAN service gateway. type: str choices: - enable", "rules (also called services) to control how sessions are distributed", "dscp_reverse: description: - Enable/disable reverse traffic DSCP tag. type: str", "\"default\": None, \"options\": { \"fail_alert_interfaces\": {\"required\": False, \"type\": \"list\", \"options\":", "Last method used to provision the content into FortiGate returned:", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #", "interfaces in the SD-WAN. type: int interface: description: - Interface", "in the SD-WAN. type: list suboptions: addr_mode: description: - Address", "disable update_static_route: description: - Enable/disable updating the static route. type:", "type: str choices: - enable - disable groups: description: -", "use of Internet service for application-based load balancing. type: str", "type: list suboptions: name: description: - Internet Service group name.", "{\"required\": False, \"type\": \"int\"} }}, \"service\": {\"required\": False, \"type\": \"list\",", "str health_check: description: - Health check. Source system.virtual-wan-link.health-check.name. type: str", "port: description: - Port number used to communicate with the", "- IPv6 gateway. type: str ingress_spillover_threshold: description: - Ingress spillover", "False, \"type\": \"str\"}, \"packet_loss_weight\": {\"required\": False, \"type\": \"int\"}, \"priority_members\": {\"required\":", "seconds when switching from the back-up member to the primary", "{\"required\": False, \"type\": \"str\"}, \"dscp_reverse\": {\"required\": False, \"type\": \"str\", \"choices\":", "False, \"type\": \"str\", \"default\": \"root\"}, \"https\": {\"required\": False, \"type\": \"bool\",", "description: - Priority of the interface (0 - 4294967295). Used", "source-ip-based - weight-based - usage-based - source-dest-ip-based - measured-volume-based members:", "to set and modify system feature and virtual_wan_link category. Examples", "link_cost_threshold: description: - Percentage threshold change of link cost values", "result returned: always type: str sample: \"success\" vdom: description: Virtual", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"internet_service_ctrl\": {\"required\": False, \"type\":", "internet_service: description: - Enable/disable use of Internet service for application-based", "description: - Member sequence number. Source system.virtual-wan-link.members.seq-num. type: int protocol:", "description: - Response string expected from the server if the", "to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in", "404 def fortios_system(data, fos): if data['system_virtual_wan_link']: resp = system_virtual_wan_link(data, fos)", "# Copyright 2019 Fortinet, Inc. # # This program is", "\"type\": \"str\"} }}, \"internet_service_id\": {\"required\": False, \"type\": \"list\", \"options\": {", "always type: str sample: \"success\" vdom: description: Virtual domain used", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"src_negate\": {\"required\": False, \"type\":", "- addr_mode: \"ipv4\" bandwidth_weight: \"54\" default: \"enable\" dscp_forward: \"enable\" dscp_forward_tag:", "return data def system_virtual_wan_link(data, fos): vdom = data['vdom'] system_virtual_wan_link_data =", "str source6: description: - Source IPv6 address used in the", "\"success\" or \\ status['http_method'] == \"DELETE\" and status['http_status'] == 404", "returned: always type: str sample: \"urlfilter\" path: description: Path of", "\"system_virtual_wan_link\": { \"required\": False, \"type\": \"dict\", \"default\": None, \"options\": {", "\"link_cost_factor\": {\"required\": False, \"type\": \"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\", \"inbandwidth\",", "threshold change of link cost values that will result in", "in the formula of custom-profile-1. type: int default: description: -", "\"comment\": {\"required\": False, \"type\": \"str\"}, \"gateway\": {\"required\": False, \"type\": \"str\"},", "\"int\"}, \"threshold_warning_jitter\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_latency\": {\"required\": False, \"type\":", "{\"required\": False, \"type\": \"int\"}, \"member\": {\"required\": False, \"type\": \"int\"}, \"mode\":", "type: int route_tag: description: - IPv4 route map route-tag. type:", "and \\ 'password' in module.params and module.params['password'] is not None", "health-check packet to the server. type: str source6: description: -", "Internet Service group name. Source application.group.name. required: true type: str", "\"service\": {\"required\": False, \"type\": \"list\", \"options\": { \"addr_mode\": {\"required\": False,", "False, \"type\": \"int\"}, \"priority_members\": {\"required\": False, \"type\": \"list\", \"options\": {", "type: int security_mode: description: - Twamp controller security mode. type:", "Address mode (IPv4 or IPv6). type: str choices: - ipv4", "gateway: description: - Enable/disable SD-WAN service gateway. type: str choices:", "true type: int jitter_threshold: description: - Jitter for SLA to", "not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if", "threshold_alert_jitter: description: - Alert threshold for jitter (ms). type: int", "percentage. (0 - 100). type: int threshold_alert_jitter: description: - Alert", "description: - Status check or health check name. required: true", "type: int member: description: - Member sequence number. type: int", "certificate must be verified by a proper CA. type: bool", "\"default\": True}, \"ssl_verify\": {\"required\": False, \"type\": \"bool\", \"default\": True}, \"system_virtual_wan_link\":", "or FortiGate username. type: str required: false password: description: -", "or IPv6). type: str choices: - ipv4 - ipv6 bandwidth_weight:", "service. type: str choices: - enable - disable dscp_forward: description:", "int quality_link: description: - Quality grade. type: int route_tag: description:", "bidirectional bandwidth in the formula of custom-profile-1. type: int default:", "legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode =", "SD-WAN. type: str choices: - disable - enable ''' EXAMPLES", "seq_num: description: - Sequence number(1-255). type: int source: description: -", "\"options\": { \"addr_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]},", "- fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP", "[\"enable\", \"disable\"]}, \"health_check\": {\"required\": False, \"type\": \"list\", \"options\": { \"addr_mode\":", "\"str\"}, \"hold_down_time\": {\"required\": False, \"type\": \"int\"}, \"id\": {\"required\": True, \"type\":", "{\"required\": False, \"type\": \"int\"}, \"priority_members\": {\"required\": False, \"type\": \"list\", \"options\":", "that this interface is connected to. type: str gateway6: description:", "inbandwidth - outbandwidth - bibandwidth - custom-profile-1 link_cost_threshold: description: -", "if the requests towards FortiGate must use HTTPS protocol. type:", "\"76\" internet_service_ctrl_group: - name: \"default_name_78 (source application.group.name)\" internet_service_custom: - name:", "used as a different unit. type: str default: root https:", "twamp - ping6 recoverytime: description: - Number of successful responses", "\"https\": {\"required\": False, \"type\": \"bool\", \"default\": True}, \"ssl_verify\": {\"required\": False,", "\"dscp_forward\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward_tag\": {\"required\":", "change of link cost values that will result in policy", "weights. type: int service: description: - Create SD-WAN rules or", "along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ =", "number of the fortigate image returned: always type: str sample:", "- Alert threshold for latency (ms). type: int threshold_alert_packetloss: description:", "- udp-echo - http - twamp - ping6 recoverytime: description:", "- bibandwidth - custom-profile-1 link_cost_threshold: description: - Percentage threshold change", "\"disable\"]} }}, \"load_balance_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"source-ip-based\", \"weight-based\",", "- Enable/disable use of Internet service for application-based load balancing.", "str choices: - enable - disable dscp_forward: description: - Enable/disable", "description: - User name. type: list suboptions: name: description: -", "status: \"disable\" ''' RETURN = ''' build: description: Build number", "the SD-WAN. type: str choices: - auto - manual -", "Priority of the interface (0 - 4294967295). Used for SD-WAN", "SLA to make decision in percentage. (0 - 100). type:", "\"int\"}, \"mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"auto\", \"manual\", \"priority\",", "Address6 or address6 group name. Source firewall.address6.name firewall.addrgrp6.name. required: true", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"src6\": {\"required\": False,", "(absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # #", "False, \"type\": \"str\", \"choices\": [\"auto\", \"manual\", \"priority\", \"sla\"]}, \"name\": {\"required\":", "in the formula of custom-profile-1. type: int latency_weight: description: -", "\"v5.6.3\" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection", "health_check: description: - SD-WAN status checking or health checking. Identify", "str choices: - enable - disable dst6: description: - Destination", "manual - priority - sla name: description: - Priority rule", "port number. type: int gateway: description: - Enable/disable SD-WAN service", "\"id\": {\"required\": True, \"type\": \"int\"} }}, \"jitter_weight\": {\"required\": False, \"type\":", "fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_system(module.params, fos) else:", "False, \"type\": \"str\"}, \"users\": {\"required\": False, \"type\": \"list\", \"options\": {", "success type: str sample: \"id\" name: description: Name of the", "values need to be adjusted to datasources before usage. Tested", "description: Indication of the operation's result returned: always type: str", "''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from", "description: - Weight of this interface for weighted load balancing.", "weighted load balancing. (0 - 255) More traffic is directed", "the server if the protocol is HTTP. type: str interval:", "communicate with the server. type: str choices: - ping -", "str start_port: description: - Start destination port number. type: int", "- SD-WAN status checking or health checking. Identify a server", "threshold_alert_packetloss: description: - Alert threshold for packet loss (percentage). type:", "\"jitter\", \"packet-loss\"]}, \"packetloss_threshold\": {\"required\": False, \"type\": \"int\"} }}, \"threshold_alert_jitter\": {\"required\":", "can be configured and used as a different unit. type:", "(percentage). type: int threshold_warning_jitter: description: - Warning threshold for jitter", "health_check: description: - Health check. Source system.virtual-wan-link.health-check.name. type: str hold_down_time:", "request returned: always type: str sample: \"webfilter\" revision: description: Internal", "str choices: - auto - manual - priority - sla", "\"12\" members: - seq_num: \"14 (source system.virtual-wan-link.members.seq-num)\" name: \"default_name_15\" packet_size:", "choices: - ipv4 - ipv6 bandwidth_weight: description: - Coefficient of", "that will result in policy route regeneration (0 - 10000000).", "- SLA ID. required: true type: int jitter_threshold: description: -", "{\"required\": False, \"type\": \"int\"}, \"security_mode\": {\"required\": False, \"type\": \"str\", \"choices\":", "latency (ms). type: int threshold_warning_packetloss: description: - Warning threshold for", "link volume, 0 - 255). type: int weight: description: -", "rules or priority rules. type: int seq_num: description: - Sequence", "true type: str health_check: description: - Health check. Source system.virtual-wan-link.health-check.name.", "\"str\"}, \"priority\": {\"required\": False, \"type\": \"int\"}, \"seq_num\": {\"required\": False, \"type\":", "\"priority\": {\"required\": False, \"type\": \"int\"}, \"seq_num\": {\"required\": False, \"type\": \"int\"},", "is not None and \\ 'password' in module.params and module.params['password']", "choices: - enable - disable dscp_forward: description: - Enable/disable forward", "verify=ssl_verify) def filter_system_virtual_wan_link_data(json): option_list = ['fail_alert_interfaces', 'fail_detect', 'health_check', 'load_balance_mode', 'members',", "description: - Address mode (IPv4 or IPv6). type: str choices:", "before server is considered recovered (1 - 3600). type: int", "Status check interval, or the time between attempting to connect", "localhost vars: host: \"192.168.122.40\" username: \"admin\" password: \"\" vdom: \"root\"", "type: str choices: - ipv4 - ipv6 failtime: description: -", "name: \"default_name_64 (source firewall.address6.name firewall.addrgrp6.name)\" end_port: \"65\" gateway: \"enable\" groups:", "{\"required\": False, \"type\": \"str\"}, \"priority\": {\"required\": False, \"type\": \"int\"}, \"seq_num\":", "meta=result) else: module.fail_json(msg=\"Error in repo\", meta=result) if __name__ == '__main__':", "\"type\": \"str\", \"choices\": [\"source-ip-based\", \"weight-based\", \"usage-based\", \"source-dest-ip-based\", \"measured-volume-based\"]}, \"members\": {\"required\":", "name: \"default_name_113 (source user.local.name)\" status: \"disable\" ''' RETURN = '''", "\"name\": {\"required\": False, \"type\": \"str\"}, \"packet_loss_weight\": {\"required\": False, \"type\": \"int\"},", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"groups\": {\"required\": False, \"type\":", "system.interface.name)\" internet_service: \"enable\" internet_service_ctrl: - id: \"76\" internet_service_ctrl_group: - name:", "status checking or health checking. Identify a server on the", "4000). required: true type: int input_device: description: - Source interface", "packet-loss - inbandwidth - outbandwidth - bibandwidth - custom-profile-1 link_cost_threshold:", "\"load_balance_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"source-ip-based\", \"weight-based\", \"usage-based\", \"source-dest-ip-based\",", "Ensures FortiGate certificate must be verified by a proper CA.", "default gateway for this interface. Usually the default gateway of", "name: description: - Group name. Source user.group.name. required: true type:", "- Virtual domain, among those defined previously. A vdom is", "description: - Alert threshold for packet loss (percentage). type: int", "IP address or FQDN name of the server. type: str", "\"45\" source: \"<your_own_value>\" source6: \"<your_own_value>\" spillover_threshold: \"48\" status: \"disable\" volume_ratio:", "header. type: str http_get: description: - URL used to communicate", "\"type\": \"str\"}, \"packet_size\": {\"required\": False, \"type\": \"int\"}, \"password\": {\"required\": False,", "fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and", "{ \"fail_alert_interfaces\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "allowing the user to set and modify system feature and", "\"24\" jitter_threshold: \"25\" latency_threshold: \"26\" link_cost_factor: \"latency\" packetloss_threshold: \"28\" threshold_alert_jitter:", "elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dst6\": {\"required\": False, \"type\": \"list\",", "in Fortinet's FortiOS and FortiGate. description: - This module is", "or priority rules. type: int seq_num: description: - Sequence number(1-255).", "\"{{ username }}\" password: \"{{ password }}\" vdom: \"{{ vdom", "String in the http-agent field in the HTTP header. type:", "True, \"type\": \"str\"}, \"packet_size\": {\"required\": False, \"type\": \"int\"}, \"password\": {\"required\":", "- name: \"default_name_113 (source user.local.name)\" status: \"disable\" ''' RETURN =", "ID list. type: list suboptions: id: description: - Internet service", "the GNU General Public License # along with this program.", "description: - Enable/disable negation of source address match. type: str", "vdom }}\" https: \"False\" system_virtual_wan_link: fail_alert_interfaces: - name: \"default_name_4 (source", "sample: 'PUT' http_status: description: Last result given by FortiGate on", "underscore_to_hyphen(v) data = new_data return data def system_virtual_wan_link(data, fos): vdom", "the selected protocol. type: int protocol: description: - Protocol used", "module: fortios_system_virtual_wan_link short_description: Configure redundant internet connections using SD-WAN (formerly", "value / sum of all values = percentage of link", "twamp test session, type: int password: description: - Twamp controller", "version. # # This program is distributed in the hope", "IPv6 gateway. type: str ingress_spillover_threshold: description: - Ingress spillover threshold", "returned: always type: str sample: \"webfilter\" revision: description: Internal revision", "}}, \"internet_service\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"internet_service_ctrl\":", "255). type: int weight: description: - Weight of this interface", "description: - Health check. Source system.virtual-wan-link.health-check.name. type: str hold_down_time: description:", "# GNU General Public License for more details. # #", "is directed to interfaces with higher weights. type: int service:", "dst_negate: description: - Enable/disable negation of destination address match. type:", "\"disable\"]}, \"dscp_reverse_tag\": {\"required\": False, \"type\": \"str\"}, \"dst\": {\"required\": False, \"type\":", "\"interface\": {\"required\": False, \"type\": \"str\"}, \"priority\": {\"required\": False, \"type\": \"int\"},", "<NAME> (@mamunozgonzalez) - <NAME> (@thomnico) notes: - Requires fortiosapi library", "to the virtual-wan-link. type: list suboptions: comment: description: - Comments.", "host: \"192.168.122.40\" username: \"admin\" password: \"\" vdom: \"root\" ssl_verify: \"False\"", "jitter - packet-loss packetloss_threshold: description: - Packet loss for SLA", "disable internet_service_ctrl: description: - Control-based Internet Service ID list. type:", "Foundation, either version 3 of the License, or # (at", "\"groups\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "all parameters and values need to be adjusted to datasources", "for load balancing Internet traffic to SD-WAN members. type: str", "method used to provision the content into FortiGate returned: always", "protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate", "type: str choices: - enable - disable load_balance_mode: description: -", "type: int gateway: description: - Enable/disable SD-WAN service gateway. type:", "\"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"bandwidth_weight\": {\"required\": False, \"type\": \"int\"},", "- name: \"default_name_4 (source system.interface.name)\" fail_detect: \"enable\" health_check: - addr_mode:", "- Coefficient of reciprocal of available bidirectional bandwidth in the", "system_virtual_wan_link_data = data['system_virtual_wan_link'] filtered_data = underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data)) return fos.set('system', 'virtual-wan-link', data=filtered_data,", "}}, \"src\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "back-up member to the primary member (0 - 10000000). type:", "sequence number. type: int mode: description: - Control how the", "\"str\", \"choices\": [\"disable\", \"enable\"]} } } } module = AnsibleModule(argument_spec=fields,", "}}, \"start_port\": {\"required\": False, \"type\": \"int\"}, \"status\": {\"required\": False, \"type\":", "program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA =", "}}, \"internet_service_group\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "address name. type: list suboptions: name: description: - Address or", "or the time between attempting to connect to the server", "- Requires fortiosapi library developed by Fortinet - Run as", "disable src6: description: - Source address6 name. type: list suboptions:", "General Public License for more details. # # You should", "Number of failures before server is considered lost (1 -", "type: str choices: - enable - disable dst6: description: -", "health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" id: \"102\" src: - name: \"default_name_104", "{\"required\": False, \"type\": \"int\"}, \"threshold_warning_jitter\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_latency\":", "system_virtual_wan_link: fail_alert_interfaces: - name: \"default_name_4 (source system.interface.name)\" fail_detect: \"enable\" health_check:", "required: true type: str start_port: description: - Start destination port", "users: description: - User name. type: list suboptions: name: description:", "in the health-check packet to the server. type: str source6:", "{\"required\": False, \"type\": \"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\", \"inbandwidth\", \"outbandwidth\",", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"end_port\": {\"required\":", "\"29\" threshold_alert_latency: \"30\" threshold_alert_packetloss: \"31\" threshold_warning_jitter: \"32\" threshold_warning_latency: \"33\" threshold_warning_packetloss:", "- Member sequence number. type: int mode: description: - Control", "enable - disable health_check: description: - SD-WAN status checking or", "Service ID. required: true type: int internet_service_ctrl_group: description: - Control-based", "serial: description: Serial number of the unit returned: always type:", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "False, \"type\": \"dict\", \"default\": None, \"options\": { \"fail_alert_interfaces\": {\"required\": False,", "name: \"default_name_78 (source application.group.name)\" internet_service_custom: - name: \"default_name_80 (source firewall.internet-service-custom.name)\"", "- <NAME> (@mamunozgonzalez) - <NAME> (@thomnico) notes: - Requires fortiosapi", "name. Source firewall.address6.name firewall.addrgrp6.name. required: true type: str start_port: description:", "description: - Enable/disable updating the static route. type: str choices:", "\"enable\" src6: - name: \"default_name_107 (source firewall.address6.name firewall.addrgrp6.name)\" start_port: \"108\"", "- name: \"default_name_64 (source firewall.address6.name firewall.addrgrp6.name)\" end_port: \"65\" gateway: \"enable\"", "type: list suboptions: seq_num: description: - Member sequence number. Source", "int sla: description: - Service level agreement (SLA). type: list", "- name: \"default_name_68 (source user.group.name)\" health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" hold_down_time:", "http_match: description: - Response string expected from the server if", "tag. type: str dst: description: - Destination address name. type:", "tag. type: str choices: - enable - disable dscp_reverse_tag: description:", "ping6 recoverytime: description: - Number of successful responses received before", "latency_weight: description: - Coefficient of latency in the formula of", "\\ status['http_method'] == \"DELETE\" and status['http_status'] == 404 def fortios_system(data,", "16776000 kbit/s). When this traffic volume threshold is reached, new", "FOS v6.0.5 version_added: \"2.8\" author: - <NAME> (@mamunozgonzalez) - <NAME>", "link). default: null type: dict suboptions: fail_alert_interfaces: description: - Physical", "type: int src: description: - Source address name. type: list", "the server (1 - 3600 sec). type: int members: description:", "\"default_name_80 (source firewall.internet-service-custom.name)\" internet_service_custom_group: - name: \"default_name_82 (source firewall.internet-service-custom-group.name)\" internet_service_group:", "{} for attribute in option_list: if attribute in json and", "- User name. Source user.local.name. required: true type: str status:", "DSCP tag. type: str dscp_reverse: description: - Enable/disable reverse traffic", "description: - Warning threshold for latency (ms). type: int threshold_warning_packetloss:", "- Member sequence number. Source system.virtual-wan-link.members.seq-num. type: int protocol: description:", "vars: host: \"192.168.122.40\" username: \"admin\" password: \"\" vdom: \"root\" ssl_verify:", "by # the Free Software Foundation, either version 3 of", "def login(data, fos): host = data['host'] username = data['username'] password", "int weight: description: - Weight of this interface for weighted", "\"str\"}, \"interval\": {\"required\": False, \"type\": \"int\"}, \"members\": {\"required\": False, \"type\":", "- Coefficient of jitter in the formula of custom-profile-1. type:", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"dst_negate\": {\"required\": False, \"type\":", "- Alert threshold for packet loss (percentage). type: int threshold_warning_jitter:", "type: str choices: - disable - enable ''' EXAMPLES =", "type: str gateway: description: - The default gateway for this", "\"enable\" health_check: - addr_mode: \"ipv4\" failtime: \"8\" http_agent: \"<your_own_value>\" http_get:", "(1 - 3600 sec). type: int members: description: - Member", "\"default_name_82 (source firewall.internet-service-custom-group.name)\" internet_service_group: - name: \"default_name_84 (source firewall.internet-service-group.name)\" internet_service_id:", "system.virtual-wan-link.health-check.name)\" id: \"102\" src: - name: \"default_name_104 (source firewall.address.name firewall.addrgrp.name)\"", "to fulfill the request returned: always type: str sample: \"webfilter\"", "data def system_virtual_wan_link(data, fos): vdom = data['vdom'] system_virtual_wan_link_data = data['system_virtual_wan_link']", "status: description: - Enable/disable SD-WAN. type: str choices: - disable", "in module.params and module.params['host'] is not None and \\ 'username'", "FortiGate. description: - This module is able to configure a", "= data['system_virtual_wan_link'] filtered_data = underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data)) return fos.set('system', 'virtual-wan-link', data=filtered_data, vdom=vdom)", "int internet_service_ctrl_group: description: - Control-based Internet Service group list. type:", "(IPv4 or IPv6). type: str choices: - ipv4 - ipv6", "gateway6: \"<your_own_value>\" ingress_spillover_threshold: \"42\" interface: \"<your_own_value> (source system.interface.name)\" priority: \"44\"", "FortiGate returned: always type: str sample: 'PUT' http_status: description: Last", "{ \"id\": {\"required\": True, \"type\": \"int\"} }}, \"internet_service_ctrl_group\": {\"required\": False,", "\"type\": \"str\"} }}, \"health_check\": {\"required\": False, \"type\": \"str\"}, \"hold_down_time\": {\"required\":", "Start destination port number. type: int status: description: - Enable/disable", "description: - Enable/disable SD-WAN. type: str choices: - disable -", "internet_service_id: - id: \"86 (source firewall.internet-service.id)\" jitter_weight: \"87\" latency_weight: \"88\"", "\"jitter_weight\": {\"required\": False, \"type\": \"int\"}, \"latency_weight\": {\"required\": False, \"type\": \"int\"},", "seq_num: \"14 (source system.virtual-wan-link.members.seq-num)\" name: \"default_name_15\" packet_size: \"16\" password: \"<<PASSWORD>>\"", "type: str fail_detect: description: - Enable/disable SD-WAN Internet connection status", "firewall.address6.name firewall.addrgrp6.name)\" start_port: \"108\" status: \"enable\" tos: \"<your_own_value>\" tos_mask: \"<your_own_value>\"", "\"health_check\": {\"required\": False, \"type\": \"str\"}, \"hold_down_time\": {\"required\": False, \"type\": \"int\"},", "a virtual instance of the FortiGate that can be configured", "redundant internet connections using SD-WAN (formerly virtual WAN link). fortios_system_virtual_wan_link:", "(@mamunozgonzalez) - <NAME> (@thomnico) notes: - Requires fortiosapi library developed", "for SD-WAN rules or priority rules. type: int seq_num: description:", "\"root\"}, \"https\": {\"required\": False, \"type\": \"bool\", \"default\": True}, \"ssl_verify\": {\"required\":", "description: - Internet service ID. Source firewall.internet-service.id. required: true type:", "- enable ''' EXAMPLES = ''' - hosts: localhost vars:", "communicate with it. type: list suboptions: addr_mode: description: - Address", "\"str\"}, \"username\": {\"required\": False, \"type\": \"str\"}, \"password\": {\"required\": False, \"type\":", "True, \"type\": \"str\"} }}, \"end_port\": {\"required\": False, \"type\": \"int\"}, \"gateway\":", "- Port number used to communicate with the server over", "description: - Enable/disable this interface in the SD-WAN. type: str", "{\"required\": False, \"type\": \"str\"}, \"gateway\": {\"required\": False, \"type\": \"str\"}, \"gateway6\":", "and used as a different unit. type: str default: root", "\"FGVMEVYYQT3AB5352\" status: description: Indication of the operation's result returned: always", "True, \"type\": \"int\"}, \"jitter_threshold\": {\"required\": False, \"type\": \"int\"}, \"latency_threshold\": {\"required\":", "- Packet loss for SLA to make decision in percentage.", "disable tos: description: - Type of service bit pattern. type:", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }} }}, \"status\":", "int jitter_weight: description: - Coefficient of jitter in the formula", "'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_system_virtual_wan_link short_description: Configure", "{\"required\": False, \"type\": \"str\", \"default\": \"root\"}, \"https\": {\"required\": False, \"type\":", "str status: description: - Enable/disable SD-WAN. type: str choices: -", "\"<<PASSWORD>>\" port: \"18\" protocol: \"ping\" recoverytime: \"20\" security_mode: \"none\" server:", "responses received before server is considered recovered (1 - 3600).", "Fortinet's FortiOS and FortiGate. description: - This module is able", "mode. type: str choices: - none - authentication server: description:", "dict): new_data = {} for k, v in data.items(): new_data[k.replace('_',", "\"type\": \"list\", \"options\": { \"id\": {\"required\": True, \"type\": \"int\"}, \"jitter_threshold\":", "- Link cost factor. type: str choices: - latency -", "'-')] = underscore_to_hyphen(v) data = new_data return data def system_virtual_wan_link(data,", "- packet-loss - inbandwidth - outbandwidth - bibandwidth - custom-profile-1", "modify # it under the terms of the GNU General", "among those defined previously. A vdom is a virtual instance", "type: str internet_service_group: description: - Internet Service group list. type:", "\"default\": \"\", \"no_log\": True}, \"vdom\": {\"required\": False, \"type\": \"str\", \"default\":", "reached, new sessions spill over to other interfaces in the", "description: Path of the table used to fulfill the request", "modify system feature and virtual_wan_link category. Examples include all parameters", "measured-volume-based members: description: - Physical FortiGate interfaces added to the", "\"int\"}, \"threshold_warning_latency\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_packetloss\": {\"required\": False, \"type\":", "application.group.name)\" internet_service_custom: - name: \"default_name_80 (source firewall.internet-service-custom.name)\" internet_service_custom_group: - name:", "\"default_name_15\" packet_size: \"16\" password: \"<<PASSWORD>>\" port: \"18\" protocol: \"ping\" recoverytime:", "\"int\"}, \"quality_link\": {\"required\": False, \"type\": \"int\"}, \"route_tag\": {\"required\": False, \"type\":", "str gateway6: description: - IPv6 gateway. type: str ingress_spillover_threshold: description:", "internet_service_ctrl: description: - Control-based Internet Service ID list. type: list", "- ping6 recoverytime: description: - Number of successful responses received", "\"name\": {\"required\": True, \"type\": \"str\"} }} }}, \"status\": {\"required\": False,", "- comment: \"Comments.\" gateway: \"<your_own_value>\" gateway6: \"<your_own_value>\" ingress_spillover_threshold: \"42\" interface:", "import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username =", "[\"disable\", \"enable\"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) #", "\"int\"}, \"interface\": {\"required\": False, \"type\": \"str\"}, \"priority\": {\"required\": False, \"type\":", "{\"required\": False, \"type\": \"int\"}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\":", "description: - Control-based Internet Service ID. required: true type: int", "Version of the FortiGate returned: always type: str sample: \"v5.6.3\"", "bits. type: str users: description: - User name. type: list", "regeneration (0 - 10000000). type: int member: description: - Member", "Control how the priority rule sets the priority of interfaces", "- manual - priority - sla name: description: - Priority", "\"default\": True}, \"system_virtual_wan_link\": { \"required\": False, \"type\": \"dict\", \"default\": None,", "security mode. type: str choices: - none - authentication server:", "control how sessions are distributed to physical interfaces in the", "{ \"id\": {\"required\": True, \"type\": \"int\"} }}, \"jitter_weight\": {\"required\": False,", "password: description: - FortiOS or FortiGate password. type: str default:", "int threshold_warning_jitter: description: - Warning threshold for jitter (ms). type:", "if 'https' in data and not data['https']: fos.https('off') else: fos.https('on')", "str sample: \"200\" mkey: description: Master key (id) used in", "user.group.name. required: true type: str health_check: description: - Health check.", "# This program is free software: you can redistribute it", "not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'],", "FortiGate or FortiOS (FOS) device by allowing the user to", "username, password, verify=ssl_verify) def filter_system_virtual_wan_link_data(json): option_list = ['fail_alert_interfaces', 'fail_detect', 'health_check',", "for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict):", "list suboptions: name: description: - Control-based Internet Service group name.", "Internet and determine how SD-WAN verifies that the FortiGate can", "Number of successful responses received before server is considered recovered", "requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_custom_group\": {\"required\": False, \"type\":", "\"choices\": [\"enable\", \"disable\"]}, \"dscp_forward_tag\": {\"required\": False, \"type\": \"str\"}, \"dscp_reverse\": {\"required\":", "for weighted load balancing. (0 - 255) More traffic is", "description: - Enable/disable use of Internet service for application-based load", "Internet service ID. Source firewall.internet-service.id. required: true type: int jitter_weight:", "description: - Source interface name. type: list suboptions: name: description:", "dst_negate: \"enable\" dst6: - name: \"default_name_64 (source firewall.address6.name firewall.addrgrp6.name)\" end_port:", "http_status: description: Last result given by FortiGate on last operation", "to base link selection. type: str choices: - latency -", "4294967295). Used for SD-WAN rules or priority rules. type: int", "\"default\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward\": {\"required\":", "list suboptions: name: description: - Group name. Source user.group.name. required:", "str src_negate: description: - Enable/disable negation of source address match.", "\"twamp\", \"ping6\"]}, \"recoverytime\": {\"required\": False, \"type\": \"int\"}, \"security_mode\": {\"required\": False,", "packet loss (percentage). type: int threshold_warning_jitter: description: - Warning threshold", "(0 - 10000000). type: int link_cost_factor: description: - Criteria on", "of this interface for weighted load balancing. (0 - 255)", "} module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using", "except ImportError: module.fail_json(msg=\"fortiosapi module is required\") fos = FortiOSAPI() login(module.params,", "(source user.local.name)\" status: \"disable\" ''' RETURN = ''' build: description:", "\"<your_own_value>\" gateway6: \"<your_own_value>\" ingress_spillover_threshold: \"42\" interface: \"<your_own_value> (source system.interface.name)\" priority:", "call to FortiGate returned: success type: str sample: \"id\" name:", "\"disable\"]}, \"dscp_forward_tag\": {\"required\": False, \"type\": \"str\"}, \"dscp_reverse\": {\"required\": False, \"type\":", "- Source IP address used in the health-check packet to", "\"str\", \"choices\": [\"enable\", \"disable\"]}, \"groups\": {\"required\": False, \"type\": \"list\", \"options\":", "- Priority rule name. type: str packet_loss_weight: description: - Coefficient", "controller security mode. type: str choices: - none - authentication", "interfaces in the SD-WAN. type: int status: description: - Enable/disable", "dscp_reverse_tag: description: - Reverse traffic DSCP tag. type: str dst:", "suboptions: name: description: - Custom Internet Service group name. Source", "Comments. type: str gateway: description: - The default gateway for", "description: - Percentage threshold change of link cost values that", "list suboptions: id: description: - SLA ID. required: true type:", "src: - name: \"default_name_104 (source firewall.address.name firewall.addrgrp.name)\" src_negate: \"enable\" src6:", "login(module.params, fos) is_error, has_changed, result = fortios_system(module.params, fos) fos.logout() if", "sum of all values = percentage of link volume, 0", "number. type: int quality_link: description: - Quality grade. type: int", "\"int\"}, \"default\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward\":", "option_list: if attribute in json and json[attribute] is not None:", "{\"required\": True, \"type\": \"str\"} }} }}, \"status\": {\"required\": False, \"type\":", "the default gateway of the Internet service provider that this", "that will be alerted. type: list suboptions: name: description: -", "host = data['host'] username = data['username'] password = data['password'] ssl_verify", "status: \"disable\" volume_ratio: \"50\" weight: \"51\" service: - addr_mode: \"ipv4\"", "Service level agreement (SLA). type: list suboptions: id: description: -", "100). type: int threshold_alert_jitter: description: - Alert threshold for jitter", "= ''' build: description: Build number of the fortigate image", "type: int update_cascade_interface: description: - Enable/disable update cascade interface. type:", "{\"required\": False, \"type\": \"str\"}, \"id\": {\"required\": False, \"type\": \"int\"} }},", "str http_get: description: - URL used to communicate with the", "the priority of interfaces in the SD-WAN. type: str choices:", "host: description: - FortiOS or FortiGate IP address. type: str", "int seq_num: description: - Sequence number(1-255). type: int source: description:", "\"\" vdom: \"root\" ssl_verify: \"False\" tasks: - name: Configure redundant", "description: - Member sequence number. type: int mode: description: -", "members: description: - Physical FortiGate interfaces added to the virtual-wan-link.", "type: int weight: description: - Weight of this interface for", "\"disable\" ''' RETURN = ''' build: description: Build number of", "loss (percentage). type: int threshold_warning_jitter: description: - Warning threshold for", "\"urlfilter\" path: description: Path of the table used to fulfill", "from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg=\"fortiosapi module is required\")", "\"type\": \"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}", "milliseconds. (0 - 10000000). type: int latency_threshold: description: - Latency", "Identify a server on the Internet and determine how SD-WAN", "description: - Coefficient of packet-loss in the formula of custom-profile-1.", "- Physical interfaces that will be alerted. type: list suboptions:", "description: - Enable/disable negation of destination address match. type: str", "\"authentication\"]}, \"server\": {\"required\": False, \"type\": \"str\"}, \"sla\": {\"required\": False, \"type\":", "\"volume_ratio\": {\"required\": False, \"type\": \"int\"}, \"weight\": {\"required\": False, \"type\": \"int\"}", "v6.0.5 version_added: \"2.8\" author: - <NAME> (@mamunozgonzalez) - <NAME> (@thomnico)", "spillover_threshold: \"48\" status: \"disable\" volume_ratio: \"50\" weight: \"51\" service: -", "enable - disable tos: description: - Type of service bit", "\"str\"}, \"packet_loss_weight\": {\"required\": False, \"type\": \"int\"}, \"priority_members\": {\"required\": False, \"type\":", "bibandwidth - custom-profile-1 link_cost_threshold: description: - Percentage threshold change of", "fos.set('system', 'virtual-wan-link', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == \"success\"", "priority - sla name: description: - Priority rule name. type:", "always type: str sample: \"17.0.2.10658\" serial: description: Serial number of", "Physical FortiGate interfaces added to the virtual-wan-link. type: list suboptions:", "type: int interface: description: - Interface name. Source system.interface.name. type:", "str choices: - none - authentication server: description: - IP", "always type: str sample: \"v5.6.3\" ''' from ansible.module_utils.basic import AnsibleModule", "type: int protocol: description: - Protocol used to determine if", "type: int latency_weight: description: - Coefficient of latency in the", "from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common", "\"threshold_alert_packetloss\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_jitter\": {\"required\": False, \"type\": \"int\"},", "rule ID (1 - 4000). required: true type: int input_device:", "- Source interface name. type: list suboptions: name: description: -", "description: - Twamp controller security mode. type: str choices: -", "interfaces added to the virtual-wan-link. type: list suboptions: comment: description:", "str choices: - enable - disable dscp_forward_tag: description: - Forward", "\"enable\" groups: - name: \"default_name_68 (source user.group.name)\" health_check: \"<your_own_value> (source", "of the fortigate image returned: always type: str sample: '1547'", "fos): if data['system_virtual_wan_link']: resp = system_virtual_wan_link(data, fos) return not is_successful_status(resp),", "\"members\": {\"required\": False, \"type\": \"list\", \"options\": { \"seq_num\": {\"required\": False,", "\"enable\" update_static_route: \"enable\" load_balance_mode: \"source-ip-based\" members: - comment: \"Comments.\" gateway:", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_id\":", "str sample: \"root\" version: description: Version of the FortiGate returned:", "\"type\": \"int\"}, \"sla\": {\"required\": False, \"type\": \"list\", \"options\": { \"health_check\":", "\"str\"}, \"dscp_reverse\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_reverse_tag\":", "DOCUMENTATION = ''' --- module: fortios_system_virtual_wan_link short_description: Configure redundant internet", "is not None if not legacy_mode: if module._socket_path: connection =", "is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data):", "}}, \"dst_negate\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dst6\":", "service name. Source firewall.internet-service-custom.name. required: true type: str internet_service_custom_group: description:", "required: true type: int jitter_weight: description: - Coefficient of jitter", "type: list suboptions: name: description: - Address or address group", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward\": {\"required\": False, \"type\": \"str\",", "status: \"enable\" tos: \"<your_own_value>\" tos_mask: \"<your_own_value>\" users: - name: \"default_name_113", "\"default_name_104 (source firewall.address.name firewall.addrgrp.name)\" src_negate: \"enable\" src6: - name: \"default_name_107", "\"type\": \"str\"} }}, \"internet_service_group\": {\"required\": False, \"type\": \"list\", \"options\": {", "members: - comment: \"Comments.\" gateway: \"<your_own_value>\" gateway6: \"<your_own_value>\" ingress_spillover_threshold: \"42\"", "\"type\": \"list\", \"options\": { \"seq_num\": {\"required\": False, \"type\": \"int\"} }},", "unit. type: str default: root https: description: - Indicates if", "virtual-wan-link. type: list suboptions: comment: description: - Comments. type: str", "'1547' http_method: description: Last method used to provision the content", "the protocol if the protocol is HTTP. type: str http_match:", "isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif", "AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from", "selected protocol. type: int protocol: description: - Protocol used to", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service\": {\"required\": False,", "description: - Reverse traffic DSCP tag. type: str dst: description:", "always type: str sample: '1547' http_method: description: Last method used", "disable dscp_reverse_tag: description: - Reverse traffic DSCP tag. type: str", "server: \"192.168.100.40\" sla: - id: \"24\" jitter_threshold: \"25\" latency_threshold: \"26\"", "\"protocol\": {\"required\": False, \"type\": \"int\"}, \"quality_link\": {\"required\": False, \"type\": \"int\"},", "to. type: str gateway6: description: - IPv6 gateway. type: str", "False, \"type\": \"int\"}, \"protocol\": {\"required\": False, \"type\": \"str\", \"choices\": [\"ping\",", "threshold_warning_latency: description: - Warning threshold for latency (ms). type: int", "FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username']", "list suboptions: health_check: description: - Virtual WAN Link health-check. Source", "description: - Enable/disable use of SD-WAN as default service. type:", "- Criteria on which to base link selection. type: str", "\"choices\": [\"enable\", \"disable\"]}, \"update_static_route\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "{'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' ---", "description: - Configure redundant internet connections using SD-WAN (formerly virtual", "name. type: list suboptions: name: description: - Interface name. Source", "- Health check. Source system.virtual-wan-link.health-check.name. type: str hold_down_time: description: -", "\"success\" vdom: description: Virtual domain used returned: always type: str", "json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def", "latency - jitter - packet-loss - inbandwidth - outbandwidth -", "Source application.group.name. required: true type: str internet_service_custom: description: - Custom", "it will be useful, # but WITHOUT ANY WARRANTY; without", "Source address6 name. type: list suboptions: name: description: - Address6", "- enable - disable load_balance_mode: description: - Algorithm or mode", "int route_tag: description: - IPv4 route map route-tag. type: int", "internet_service_custom: - name: \"default_name_80 (source firewall.internet-service-custom.name)\" internet_service_custom_group: - name: \"default_name_82", "choices: - enable - disable load_balance_mode: description: - Algorithm or", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"start_port\": {\"required\":", "type: str internet_service_custom_group: description: - Custom Internet Service group list.", "FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_system(module.params, fos) fos.logout()", "recoverytime: description: - Number of successful responses received before server", "traffic DSCP tag. type: str dst: description: - Destination address", "(ms). type: int threshold_alert_packetloss: description: - Alert threshold for packet", "check. Source system.virtual-wan-link.health-check.name. type: str hold_down_time: description: - Waiting period", "# it under the terms of the GNU General Public", "# # This program is free software: you can redistribute", "\"108\" status: \"enable\" tos: \"<your_own_value>\" tos_mask: \"<your_own_value>\" users: - name:", "\"internet_service_ctrl_group\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "- ipv6 failtime: description: - Number of failures before server", "latency in the formula of custom-profile-1. type: int link_cost_factor: description:", "type: str end_port: description: - End destination port number. type:", "hope that it will be useful, # but WITHOUT ANY", "for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data", "data['system_virtual_wan_link'] filtered_data = underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data)) return fos.set('system', 'virtual-wan-link', data=filtered_data, vdom=vdom) def", "gateway: description: - The default gateway for this interface. Usually", "list suboptions: name: description: - User name. Source user.local.name. required:", "suboptions: fail_alert_interfaces: description: - Physical interfaces that will be alerted.", "description: - Control-based Internet Service ID list. type: list suboptions:", "operation's result returned: always type: str sample: \"success\" vdom: description:", "{\"required\": False, \"type\": \"int\"}, \"seq_num\": {\"required\": False, \"type\": \"int\"}, \"source\":", "true type: str status: description: - Enable/disable SD-WAN. type: str", "result = fortios_system(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result)", "and status['http_status'] == 404 def fortios_system(data, fos): if data['system_virtual_wan_link']: resp", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"update_static_route\": {\"required\": False, \"type\":", "{\"required\": False, \"type\": \"str\", \"choices\": [\"disable\", \"enable\"]} } } }", "description: - Source IP address used in the health-check packet", "\"success\", \\ resp def main(): fields = { \"host\": {\"required\":", "instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host']", "start_port: description: - Start destination port number. type: int status:", "False, \"type\": \"int\"}, \"sla\": {\"required\": False, \"type\": \"list\", \"options\": {", "and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary", "server on the Internet and determine how SD-WAN verifies that", "\"bool\", \"default\": True}, \"system_virtual_wan_link\": { \"required\": False, \"type\": \"dict\", \"default\":", "service provider that this interface is connected to. type: str", "(1 - 3600). type: int http_agent: description: - String in", "description: - Control how the priority rule sets the priority", "fortios_system_virtual_wan_link: host: \"{{ host }}\" username: \"{{ username }}\" password:", "name: \"default_name_68 (source user.group.name)\" health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" hold_down_time: \"70\"", "load balancing. type: str choices: - enable - disable internet_service_ctrl:", "[\"enable\", \"disable\"]}, \"dscp_forward\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]},", "type: str choices: - enable - disable internet_service_ctrl: description: -", "determine how SD-WAN verifies that the FortiGate can communicate with", "{\"required\": True, \"type\": \"str\"} }}, \"internet_service_custom_group\": {\"required\": False, \"type\": \"list\",", "description: - Warning threshold for packet loss (percentage). type: int", "description: - FortiOS or FortiGate username. type: str required: false", "description: - End destination port number. type: int gateway: description:", "\"choices\": [\"disable\", \"enable\"]}, \"volume_ratio\": {\"required\": False, \"type\": \"int\"}, \"weight\": {\"required\":", "choices: - disable - enable ''' EXAMPLES = ''' -", "- IPv4 route map route-tag. type: int sla: description: -", "of reciprocal of available bidirectional bandwidth in the formula of", "name. Source user.group.name. required: true type: str health_check: description: -", "of packet-loss in the formula of custom-profile-1. type: int priority_members:", "{\"required\": False, \"type\": \"int\"}, \"threshold_alert_packetloss\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_jitter\":", "dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list):", "False, \"type\": \"str\"}, \"spillover_threshold\": {\"required\": False, \"type\": \"int\"}, \"status\": {\"required\":", "\"type\": \"str\"} }} }}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\":", "}}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"disable\", \"enable\"]} }", "of Internet service for application-based load balancing. type: str choices:", "\"str\"} }}, \"fail_detect\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]},", "\"98\" route_tag: \"99\" sla: - health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" id:", "description: - Service level agreement (SLA). type: list suboptions: id:", "type: str priority: description: - Priority of the interface (0", "module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg=\"fortiosapi", "Enable/disable update cascade interface. type: str choices: - enable -", "load_balance_mode: description: - Algorithm or mode to use for load", "packet_size: \"16\" password: \"<<PASSWORD>>\" port: \"18\" protocol: \"ping\" recoverytime: \"20\"", "type: int default: description: - Enable/disable use of SD-WAN as", "\"default_name_93\" packet_loss_weight: \"94\" priority_members: - seq_num: \"96 (source system.virtual-wan-link.members.seq-num)\" protocol:", "type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION", "the formula of custom-profile-1. type: int default: description: - Enable/disable", "Health check. Source system.virtual-wan-link.health-check.name. type: str hold_down_time: description: - Waiting", "= json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for", "\"type\": \"str\"}, \"priority\": {\"required\": False, \"type\": \"int\"}, \"seq_num\": {\"required\": False,", "= data['host'] username = data['username'] password = data['password'] ssl_verify =", "\"default_name_107 (source firewall.address6.name firewall.addrgrp6.name)\" start_port: \"108\" status: \"enable\" tos: \"<your_own_value>\"", "Coefficient of reciprocal of available bidirectional bandwidth in the formula", "type: int name: description: - Status check or health check", "to other interfaces in the SD-WAN. type: int interface: description:", "PURPOSE. See the # GNU General Public License for more", "and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify)", "(source system.interface.name)\" internet_service: \"enable\" internet_service_ctrl: - id: \"76\" internet_service_ctrl_group: -", "True, \"type\": \"str\"} }}, \"internet_service_group\": {\"required\": False, \"type\": \"list\", \"options\":", "load balancing Internet traffic to SD-WAN members. type: str choices:", "\"choices\": [\"latency\", \"jitter\", \"packet-loss\", \"inbandwidth\", \"outbandwidth\", \"bibandwidth\", \"custom-profile-1\"]}, \"link_cost_threshold\": {\"required\":", "threshold for latency (ms). type: int threshold_warning_packetloss: description: - Warning", "False, \"type\": \"int\"}, \"members\": {\"required\": False, \"type\": \"list\", \"options\": {", "true type: int input_device: description: - Source interface name. type:", "type: str default: root https: description: - Indicates if the", "\"latency\" packetloss_threshold: \"28\" threshold_alert_jitter: \"29\" threshold_alert_latency: \"30\" threshold_alert_packetloss: \"31\" threshold_warning_jitter:", "WAN link) in Fortinet's FortiOS and FortiGate. description: - This", "is_error, has_changed, result = fortios_system(module.params, fos) fos.logout() if not is_error:", "of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is", "destination address match. type: str choices: - enable - disable", "type: list suboptions: name: description: - Custom Internet Service group", "Service level agreement (SLA). type: list suboptions: health_check: description: -", "redistribute it and/or modify # it under the terms of", "'host' in module.params and module.params['host'] is not None and \\", "default: \"\" vdom: description: - Virtual domain, among those defined", "http_agent: description: - String in the http-agent field in the", "description: Last result given by FortiGate on last operation applied", "Fortinet - Run as a local_action in your playbook requirements:", "def filter_system_virtual_wan_link_data(json): option_list = ['fail_alert_interfaces', 'fail_detect', 'health_check', 'load_balance_mode', 'members', 'service',", "- measured-volume-based members: description: - Physical FortiGate interfaces added to", "suboptions: name: description: - Custom Internet service name. Source firewall.internet-service-custom.name.", "description: Virtual domain used returned: always type: str sample: \"root\"", "returned: always type: str sample: \"root\" version: description: Version of", "is_successful_status(status): return status['status'] == \"success\" or \\ status['http_method'] == \"DELETE\"", "type: str choices: - enable - disable tos: description: -", "[\"disable\", \"enable\"]}, \"volume_ratio\": {\"required\": False, \"type\": \"int\"}, \"weight\": {\"required\": False,", "\"int\"}, \"priority_members\": {\"required\": False, \"type\": \"list\", \"options\": { \"seq_num\": {\"required\":", "\"str\"}, \"http_get\": {\"required\": False, \"type\": \"str\"}, \"http_match\": {\"required\": False, \"type\":", "seq_num: \"45\" source: \"<your_own_value>\" source6: \"<your_own_value>\" spillover_threshold: \"48\" status: \"disable\"", "key (id) used in the last call to FortiGate returned:", "\"latency_threshold\": {\"required\": False, \"type\": \"int\"}, \"link_cost_factor\": {\"required\": False, \"type\": \"str\",", "== 404 def fortios_system(data, fos): if data['system_virtual_wan_link']: resp = system_virtual_wan_link(data,", "\"type\": \"str\"} }}, \"internet_service_custom\": {\"required\": False, \"type\": \"list\", \"options\": {", "milliseconds. (0 - 10000000). type: int link_cost_factor: description: - Criteria", "input_device: - name: \"default_name_73 (source system.interface.name)\" internet_service: \"enable\" internet_service_ctrl: -", "\"int\"}, \"latency_threshold\": {\"required\": False, \"type\": \"int\"}, \"link_cost_factor\": {\"required\": False, \"type\":", "\"type\": \"int\"}, \"latency_threshold\": {\"required\": False, \"type\": \"int\"}, \"link_cost_factor\": {\"required\": False,", "type: str sample: \"v5.6.3\" ''' from ansible.module_utils.basic import AnsibleModule from", "\"int\"} }}, \"threshold_alert_jitter\": {\"required\": False, \"type\": \"int\"}, \"threshold_alert_latency\": {\"required\": False,", "the user to set and modify system feature and virtual_wan_link", "gateway: \"enable\" groups: - name: \"default_name_68 (source user.group.name)\" health_check: \"<your_own_value>", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"health_check\": {\"required\": False, \"type\":", "True, \"type\": \"str\"} }}, \"internet_service_custom\": {\"required\": False, \"type\": \"list\", \"options\":", "true type: str end_port: description: - End destination port number.", "latency_weight: \"88\" link_cost_factor: \"latency\" link_cost_threshold: \"90\" member: \"91\" mode: \"auto\"", "type: int threshold_alert_latency: description: - Alert threshold for latency (ms).", "the hope that it will be useful, # but WITHOUT", "SLA ID. required: true type: int jitter_threshold: description: - Jitter", "route_tag: description: - IPv4 route map route-tag. type: int sla:", "\"type\": \"str\"}, \"source6\": {\"required\": False, \"type\": \"str\"}, \"spillover_threshold\": {\"required\": False,", "options: host: description: - FortiOS or FortiGate IP address. type:", "name: description: - Address6 or address6 group name. Source firewall.address6.name", "Priority rule name. type: str packet_loss_weight: description: - Coefficient of", "\"int\"}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"disable\", \"enable\"]}, \"volume_ratio\":", "\"int\"}, \"protocol\": {\"required\": False, \"type\": \"str\", \"choices\": [\"ping\", \"tcp-echo\", \"udp-echo\",", "= {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = '''", "status['http_method'] == \"DELETE\" and status['http_status'] == 404 def fortios_system(data, fos):", "result = fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi", "\"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"disable\", \"enable\"]} } }", "cost factor. type: str choices: - latency - jitter -", "to the primary member (0 - 10000000). type: int id:", "route map route-tag. type: int sla: description: - Service level", "that the FortiGate can communicate with it. type: list suboptions:", "to communicate with the server over the selected protocol. type:", "(source firewall.address6.name firewall.addrgrp6.name)\" start_port: \"108\" status: \"enable\" tos: \"<your_own_value>\" tos_mask:", "update cascade interface. type: str choices: - enable - disable", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dst6\": {\"required\": False,", "groups: description: - User groups. type: list suboptions: name: description:", "- disable tos: description: - Type of service bit pattern.", "description: - Interface name. Source system.interface.name. type: str priority: description:", "description: - Type of service evaluated bits. type: str users:", "dict suboptions: fail_alert_interfaces: description: - Physical interfaces that will be", "\"choices\": [\"none\", \"authentication\"]}, \"server\": {\"required\": False, \"type\": \"str\"}, \"sla\": {\"required\":", "\"str\", \"choices\": [\"disable\", \"enable\"]}, \"volume_ratio\": {\"required\": False, \"type\": \"int\"}, \"weight\":", "(percentage). type: int update_cascade_interface: description: - Enable/disable update cascade interface.", "False, \"type\": \"int\"}, \"threshold_warning_latency\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_packetloss\": {\"required\":", "include all parameters and values need to be adjusted to", "name list. type: list suboptions: name: description: - Custom Internet", "of the FortiGate that can be configured and used as", "fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except", "{\"required\": False, \"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"failtime\": {\"required\": False,", "Used for SD-WAN rules or priority rules. type: int seq_num:", "name: description: - Priority rule name. type: str packet_loss_weight: description:", "bool default: true ssl_verify: description: - Ensures FortiGate certificate must", "choices: - latency - jitter - packet-loss - inbandwidth -", "description: - Quality grade. type: int route_tag: description: - IPv4", "http-agent field in the HTTP header. type: str http_get: description:", "- SLA ID. type: int src: description: - Source address", "sample: \"success\" vdom: description: Virtual domain used returned: always type:", "fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_system_virtual_wan_link_data(json): option_list = ['fail_alert_interfaces',", "address. type: str required: false username: description: - FortiOS or", "in the SD-WAN. type: int status: description: - Enable/disable this", "name. type: list suboptions: name: description: - Address6 or address6", "- Enable/disable this interface in the SD-WAN. type: str choices:", "firewall.addrgrp6.name. required: true type: str start_port: description: - Start destination", "tos_mask: description: - Type of service evaluated bits. type: str", "= ['fail_alert_interfaces', 'fail_detect', 'health_check', 'load_balance_mode', 'members', 'service', 'status'] dictionary =", "distributed in the hope that it will be useful, #", "Master key (id) used in the last call to FortiGate", "License, or # (at your option) any later version. #", "{\"required\": False, \"type\": \"str\"}, \"gateway6\": {\"required\": False, \"type\": \"str\"}, \"ingress_spillover_threshold\":", "grade. type: int route_tag: description: - IPv4 route map route-tag.", "\"options\": { \"health_check\": {\"required\": False, \"type\": \"str\"}, \"id\": {\"required\": False,", "\"type\": \"int\"}, \"latency_weight\": {\"required\": False, \"type\": \"int\"}, \"link_cost_factor\": {\"required\": False,", "False, \"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"bandwidth_weight\": {\"required\": False, \"type\":", "the static route. type: str choices: - enable - disable", "\"dscp_reverse_tag\": {\"required\": False, \"type\": \"str\"}, \"dst\": {\"required\": False, \"type\": \"list\",", "in module.params and module.params['username'] is not None and \\ 'password'", "Custom Internet service name list. type: list suboptions: name: description:", "False, \"type\": \"str\"}, \"http_get\": {\"required\": False, \"type\": \"str\"}, \"http_match\": {\"required\":", "FortiGate returned: always type: str sample: \"v5.6.3\" ''' from ansible.module_utils.basic", "You should have received a copy of the GNU General", "\"source\": {\"required\": False, \"type\": \"str\"}, \"source6\": {\"required\": False, \"type\": \"str\"},", "type: int threshold_warning_jitter: description: - Warning threshold for jitter (ms).", "{\"required\": False, \"type\": \"list\", \"options\": { \"seq_num\": {\"required\": False, \"type\":", "\"type\": \"str\"}, \"users\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "required: true type: str status: description: - Enable/disable SD-WAN. type:", "\"14 (source system.virtual-wan-link.members.seq-num)\" name: \"default_name_15\" packet_size: \"16\" password: \"<<PASSWORD>>\" port:", "- enable - disable tos: description: - Type of service", "group name. Source application.group.name. required: true type: str internet_service_custom: description:", "- disable health_check: description: - SD-WAN status checking or health", "FortiGate password. type: str default: \"\" vdom: description: - Virtual", "\"16\" password: \"<<PASSWORD>>\" port: \"18\" protocol: \"ping\" recoverytime: \"20\" security_mode:", "not None and \\ 'password' in module.params and module.params['password'] is", "if the protocol if the protocol is HTTP. type: str", "firewall.internet-service-custom-group.name)\" internet_service_group: - name: \"default_name_84 (source firewall.internet-service-group.name)\" internet_service_id: - id:", "True, \"type\": \"str\"} }}, \"start_port\": {\"required\": False, \"type\": \"int\"}, \"status\":", "threshold for jitter (ms). type: int threshold_warning_latency: description: - Warning", "- Source IPv6 address used in the health-check packet to", "Packet loss for SLA to make decision in percentage. (0", "\"id\": {\"required\": True, \"type\": \"int\"}, \"input_device\": {\"required\": False, \"type\": \"list\",", "\"member\": {\"required\": False, \"type\": \"int\"}, \"mode\": {\"required\": False, \"type\": \"str\",", "you can redistribute it and/or modify # it under the", "a server on the Internet and determine how SD-WAN verifies", "\"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"bandwidth_weight\": {\"required\": False, \"type\": \"int\"}, \"default\":", "number. type: int status: description: - Enable/disable SD-WAN service. type:", "packet_loss_weight: description: - Coefficient of packet-loss in the formula of", "'status'] dictionary = {} for attribute in option_list: if attribute", "for latency (ms). type: int threshold_alert_packetloss: description: - Alert threshold", "always type: str sample: \"webfilter\" revision: description: Internal revision number", "weight: description: - Weight of this interface for weighted load", "enable - disable load_balance_mode: description: - Algorithm or mode to", "- http - twamp - ping6 recoverytime: description: - Number", "as a different unit. type: str default: root https: description:", "Coefficient of packet-loss in the formula of custom-profile-1. type: int", "protocol. type: int protocol: description: - Protocol used to determine", "RETURN = ''' build: description: Build number of the fortigate", "sample: '1547' http_method: description: Last method used to provision the", "True, \"type\": \"str\"} }}, \"src_negate\": {\"required\": False, \"type\": \"str\", \"choices\":", "src_negate: \"enable\" src6: - name: \"default_name_107 (source firewall.address6.name firewall.addrgrp6.name)\" start_port:", "json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"tos\": {\"required\": False, \"type\": \"str\"},", "Source firewall.internet-service-custom-group.name. required: true type: str internet_service_group: description: - Internet", "True}, \"ssl_verify\": {\"required\": False, \"type\": \"bool\", \"default\": True}, \"system_virtual_wan_link\": {", "- id: \"24\" jitter_threshold: \"25\" latency_threshold: \"26\" link_cost_factor: \"latency\" packetloss_threshold:", "int src: description: - Source address name. type: list suboptions:", "ping - tcp-echo - udp-echo - http - twamp -", "description: - Alert threshold for jitter (ms). type: int threshold_alert_latency:", "detection). type: str choices: - enable - disable health_check: description:", "the last call to FortiGate returned: success type: str sample:", "connections using SD-WAN (formerly virtual WAN link). fortios_system_virtual_wan_link: host: \"{{", "False, \"type\": \"str\"}, \"interval\": {\"required\": False, \"type\": \"int\"}, \"members\": {\"required\":", "http_match: \"<your_own_value>\" interval: \"12\" members: - seq_num: \"14 (source system.virtual-wan-link.members.seq-num)\"", "\"disable\"]}, \"dst6\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "\"quality_link\": {\"required\": False, \"type\": \"int\"}, \"route_tag\": {\"required\": False, \"type\": \"int\"},", "\"bibandwidth\", \"custom-profile-1\"]}, \"link_cost_threshold\": {\"required\": False, \"type\": \"int\"}, \"member\": {\"required\": False,", "true type: str internet_service_custom: description: - Custom Internet service name", "\"options\": { \"fail_alert_interfaces\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "choices: - enable - disable src6: description: - Source address6", "'PUT' http_status: description: Last result given by FortiGate on last", "communicate with the server if the protocol if the protocol", "GNU General Public License as published by # the Free", "- ipv6 bandwidth_weight: description: - Coefficient of reciprocal of available", "Virtual domain used returned: always type: str sample: \"root\" version:", "type: bool default: true version_added: 2.9 system_virtual_wan_link: description: - Configure", "HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures", "WAN link). fortios_system_virtual_wan_link: host: \"{{ host }}\" username: \"{{ username", "\"dscp_forward_tag\": {\"required\": False, \"type\": \"str\"}, \"dscp_reverse\": {\"required\": False, \"type\": \"str\",", "for jitter (ms). type: int threshold_alert_latency: description: - Alert threshold", "(source firewall.internet-service-custom.name)\" internet_service_custom_group: - name: \"default_name_82 (source firewall.internet-service-custom-group.name)\" internet_service_group: -", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"fail_detect\":", "custom-profile-1. type: int priority_members: description: - Member sequence number list.", "[\"enable\", \"disable\"]}, \"dst6\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "= data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data", "gateway of the Internet service provider that this interface is", "list suboptions: comment: description: - Comments. type: str gateway: description:", "[\"enable\", \"disable\"]}, \"internet_service_ctrl\": {\"required\": False, \"type\": \"list\", \"options\": { \"id\":", "bandwidth in the formula of custom-profile-1. type: int default: description:", "Internet Service group name. Source firewall.internet-service-custom-group.name. required: true type: str", "firewall.address6.name firewall.addrgrp6.name)\" end_port: \"65\" gateway: \"enable\" groups: - name: \"default_name_68", "fail_detect: \"enable\" health_check: - addr_mode: \"ipv4\" failtime: \"8\" http_agent: \"<your_own_value>\"", "number. type: int mode: description: - Control how the priority", "\"str\"}, \"gateway6\": {\"required\": False, \"type\": \"str\"}, \"ingress_spillover_threshold\": {\"required\": False, \"type\":", "{ \"id\": {\"required\": True, \"type\": \"int\"}, \"jitter_threshold\": {\"required\": False, \"type\":", "in json and json[attribute] is not None: dictionary[attribute] = json[attribute]", "type: str sample: \"root\" version: description: Version of the FortiGate", "def main(): fields = { \"host\": {\"required\": False, \"type\": \"str\"},", "to make decision in percentage. (0 - 100). type: int", "suboptions: name: description: - Group name. Source user.group.name. required: true", "False, \"type\": \"int\"}, \"http_agent\": {\"required\": False, \"type\": \"str\"}, \"http_get\": {\"required\":", "name: \"default_name_84 (source firewall.internet-service-group.name)\" internet_service_id: - id: \"86 (source firewall.internet-service.id)\"", "\"options\": { \"seq_num\": {\"required\": False, \"type\": \"int\"} }}, \"name\": {\"required\":", "choices: - enable - disable health_check: description: - SD-WAN status", "source-dest-ip-based - measured-volume-based members: description: - Physical FortiGate interfaces added", "fos) is_error, has_changed, result = fortios_system(module.params, fos) fos.logout() if not", "internet_service_group: - name: \"default_name_84 (source firewall.internet-service-group.name)\" internet_service_id: - id: \"86", "- seq_num: \"96 (source system.virtual-wan-link.members.seq-num)\" protocol: \"97\" quality_link: \"98\" route_tag:", "internet_service: \"enable\" internet_service_ctrl: - id: \"76\" internet_service_ctrl_group: - name: \"default_name_78", "program is free software: you can redistribute it and/or modify", "loss for SLA to make decision in percentage. (0 -", "{\"required\": False, \"type\": \"int\"} }}, \"name\": {\"required\": True, \"type\": \"str\"},", "test session, type: int password: description: - Twamp controller password", "packet-loss in the formula of custom-profile-1. type: int priority_members: description:", "- jitter - packet-loss packetloss_threshold: description: - Packet loss for", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]} }}, \"load_balance_mode\": {\"required\":", "revision number returned: always type: str sample: \"17.0.2.10658\" serial: description:", "\"44\" seq_num: \"45\" source: \"<your_own_value>\" source6: \"<your_own_value>\" spillover_threshold: \"48\" status:", "description: - Ensures FortiGate certificate must be verified by a", "load_balance_mode: \"source-ip-based\" members: - comment: \"Comments.\" gateway: \"<your_own_value>\" gateway6: \"<your_own_value>\"", "\"int\"} }}, \"name\": {\"required\": True, \"type\": \"str\"}, \"packet_size\": {\"required\": False,", "type: list suboptions: name: description: - Group name. Source user.group.name.", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_id\": {\"required\": False, \"type\":", "application.group.name. required: true type: str internet_service_custom: description: - Custom Internet", "type: int threshold_alert_jitter: description: - Alert threshold for jitter (ms).", "version: description: Version of the FortiGate returned: always type: str", "true type: str internet_service_custom_group: description: - Custom Internet Service group", "\"enable\"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode", "{\"required\": False, \"type\": \"str\"}, \"http_match\": {\"required\": False, \"type\": \"str\"}, \"interval\":", "used to communicate with the server over the selected protocol.", "type: int id: description: - Priority rule ID (1 -", "Warning threshold for jitter (ms). type: int threshold_warning_latency: description: -", "as published by # the Free Software Foundation, either version", "Quality grade. type: int route_tag: description: - IPv4 route map", "{\"required\": False, \"type\": \"str\"}, \"packet_loss_weight\": {\"required\": False, \"type\": \"int\"}, \"priority_members\":", "\"type\": \"int\"}, \"quality_link\": {\"required\": False, \"type\": \"int\"}, \"route_tag\": {\"required\": False,", "data['system_virtual_wan_link']: resp = system_virtual_wan_link(data, fos) return not is_successful_status(resp), \\ resp['status']", "server: description: - IP address or FQDN name of the", "int id: description: - Priority rule ID (1 - 4000).", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"tos\": {\"required\": False, \"type\":", "type: str choices: - latency - jitter - packet-loss packetloss_threshold:", "short_description: Configure redundant internet connections using SD-WAN (formerly virtual WAN", "- usage-based - source-dest-ip-based - measured-volume-based members: description: - Physical", "fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_system_virtual_wan_link_data(json): option_list", "required: false password: description: - FortiOS or FortiGate password. type:", "system.virtual-wan-link.health-check.name. type: str hold_down_time: description: - Waiting period in seconds", "\"choices\": [\"enable\", \"disable\"]}, \"dst6\": {\"required\": False, \"type\": \"list\", \"options\": {", "description: - Control-based Internet Service group name. Source application.group.name. required:", "FortiOS and FortiGate. description: - This module is able to", "\"str\"}, \"id\": {\"required\": False, \"type\": \"int\"} }}, \"src\": {\"required\": False,", "description: - Sequence number(1-255). type: int source: description: - Source", "\"end_port\": {\"required\": False, \"type\": \"int\"}, \"gateway\": {\"required\": False, \"type\": \"str\",", "\"src_negate\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"src6\": {\"required\":", "Member sequence number. Source system.virtual-wan-link.members.seq-num. type: int name: description: -", "status['http_status'] == 404 def fortios_system(data, fos): if data['system_virtual_wan_link']: resp =", "- Coefficient of latency in the formula of custom-profile-1. type:", "traffic is directed to interfaces with higher weights. type: int", "vdom = data['vdom'] system_virtual_wan_link_data = data['system_virtual_wan_link'] filtered_data = underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data)) return", "str sla: description: - Service level agreement (SLA). type: list", "make decision in percentage. (0 - 100). type: int threshold_alert_jitter:", "- Weight of this interface for weighted load balancing. (0", "str choices: - enable - disable dscp_reverse_tag: description: - Reverse", "health-check. Source system.virtual-wan-link.health-check.name. type: str id: description: - SLA ID.", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_custom\": {\"required\":", "Service group name. Source firewall.internet-service-custom-group.name. required: true type: str internet_service_group:", "spillover_threshold: description: - Egress spillover threshold for this interface (0", "- 255). type: int weight: description: - Weight of this", "check interval, or the time between attempting to connect to", "of the FortiGate returned: always type: str sample: \"v5.6.3\" '''", "Public License as published by # the Free Software Foundation,", "3600). type: int http_agent: description: - String in the http-agent", "destination port number. type: int status: description: - Enable/disable SD-WAN", "type: str sample: \"17.0.2.10658\" serial: description: Serial number of the", "system_virtual_wan_link(data, fos): vdom = data['vdom'] system_virtual_wan_link_data = data['system_virtual_wan_link'] filtered_data =", "true type: int jitter_weight: description: - Coefficient of jitter in", "of latency in the formula of custom-profile-1. type: int link_cost_factor:", "used to determine if the FortiGate can communicate with the", "type: int sla: description: - Service level agreement (SLA). type:", "start_port: \"108\" status: \"enable\" tos: \"<your_own_value>\" tos_mask: \"<your_own_value>\" users: -", "}}, \"jitter_weight\": {\"required\": False, \"type\": \"int\"}, \"latency_weight\": {\"required\": False, \"type\":", "percentage of link volume, 0 - 255). type: int weight:", "have received a copy of the GNU General Public License", "v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data", "= data['vdom'] system_virtual_wan_link_data = data['system_virtual_wan_link'] filtered_data = underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data)) return fos.set('system',", "True, \"type\": \"str\"} }}, \"fail_detect\": {\"required\": False, \"type\": \"str\", \"choices\":", "'password' in module.params and module.params['password'] is not None if not", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_custom_group\": {\"required\": False,", "- User groups. type: list suboptions: name: description: - Group", "}} }}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"disable\", \"enable\"]}", "in the hope that it will be useful, # but", "fos) return not is_successful_status(resp), \\ resp['status'] == \"success\", \\ resp", "\"192.168.100.40\" sla: - id: \"24\" jitter_threshold: \"25\" latency_threshold: \"26\" link_cost_factor:", "failtime: description: - Number of failures before server is considered", "\"weight\": {\"required\": False, \"type\": \"int\"} }}, \"service\": {\"required\": False, \"type\":", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_forward\": {\"required\": False,", "category. Examples include all parameters and values need to be", "SD-WAN service. type: str choices: - enable - disable tos:", "\"int\"}, \"password\": {\"required\": False, \"type\": \"str\"}, \"port\": {\"required\": False, \"type\":", "weight-based - usage-based - source-dest-ip-based - measured-volume-based members: description: -", "password: \"{{ password }}\" vdom: \"{{ vdom }}\" https: \"False\"", "password: \"<<PASSWORD>>\" port: \"18\" protocol: \"ping\" recoverytime: \"20\" security_mode: \"none\"", "type: str interval: description: - Status check interval, or the", "enable - disable update_static_route: description: - Enable/disable updating the static", "last call to FortiGate returned: success type: str sample: \"id\"", "login(data, fos): host = data['host'] username = data['username'] password =", "\"type\": \"list\", \"options\": { \"addr_mode\": {\"required\": False, \"type\": \"str\", \"choices\":", "- disable dscp_reverse_tag: description: - Reverse traffic DSCP tag. type:", "decision in milliseconds. (0 - 10000000). type: int link_cost_factor: description:", "suboptions: comment: description: - Comments. type: str gateway: description: -", "\"dst6\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "{\"required\": False, \"type\": \"int\"}, \"password\": {\"required\": False, \"type\": \"str\"}, \"port\":", "False, \"type\": \"int\"} }}, \"threshold_alert_jitter\": {\"required\": False, \"type\": \"int\"}, \"threshold_alert_latency\":", "interface name. Source system.interface.name. required: true type: str fail_detect: description:", "in milliseconds. (0 - 10000000). type: int latency_threshold: description: -", "used in the health-check packet to the server. type: str", "\"ping\" recoverytime: \"20\" security_mode: \"none\" server: \"192.168.100.40\" sla: - id:", "ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not", "- User name. type: list suboptions: name: description: - User", "- Interface name. Source system.interface.name. type: str priority: description: -", "always type: str sample: \"urlfilter\" path: description: Path of the", "attempting to connect to the server (1 - 3600 sec).", "type: str choices: - disable - enable volume_ratio: description: -", "return status['status'] == \"success\" or \\ status['http_method'] == \"DELETE\" and", "False, \"type\": \"str\"}, \"dscp_reverse\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "of SD-WAN as default service. type: str choices: - enable", "False, \"type\": \"list\", \"options\": { \"seq_num\": {\"required\": False, \"type\": \"int\"}", "{\"required\": False, \"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"bandwidth_weight\": {\"required\": False,", "description: - Interface name. Source system.interface.name. required: true type: str", "\"str\"}, \"dst\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "interface for weighted load balancing. (0 - 255) More traffic", "False, \"type\": \"int\"}, \"gateway\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "GNU General Public License for more details. # # You", "for this interface (0 - 16776000 kbit/s). When this traffic", "of source address match. type: str choices: - enable -", "description: - Physical FortiGate interfaces added to the virtual-wan-link. type:", "firewall.address.name firewall.addrgrp.name. required: true type: str dst_negate: description: - Enable/disable", "status: description: - Enable/disable SD-WAN service. type: str choices: -", "description: - User name. Source user.local.name. required: true type: str", "\"choices\": [\"enable\", \"disable\"]}, \"dscp_reverse_tag\": {\"required\": False, \"type\": \"str\"}, \"dst\": {\"required\":", "internet_service_ctrl_group: - name: \"default_name_78 (source application.group.name)\" internet_service_custom: - name: \"default_name_80", "and values need to be adjusted to datasources before usage.", "\"sla\": {\"required\": False, \"type\": \"list\", \"options\": { \"health_check\": {\"required\": False,", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "system.virtual-wan-link.members.seq-num)\" protocol: \"97\" quality_link: \"98\" route_tag: \"99\" sla: - health_check:", "\"type\": \"list\", \"options\": { \"health_check\": {\"required\": False, \"type\": \"str\"}, \"id\":", "choices: - ping - tcp-echo - udp-echo - http -", "else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_system_virtual_wan_link_data(json): option_list =", "and module.params['password'] is not None if not legacy_mode: if module._socket_path:", "- Internet service ID. Source firewall.internet-service.id. required: true type: int", "True}, \"system_virtual_wan_link\": { \"required\": False, \"type\": \"dict\", \"default\": None, \"options\":", "is free software: you can redistribute it and/or modify #", "\"str\"} }}, \"internet_service_custom_group\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "\"list\", \"options\": { \"comment\": {\"required\": False, \"type\": \"str\"}, \"gateway\": {\"required\":", "directed to interfaces with higher weights. type: int service: description:", "True, \"type\": \"str\"} }}, \"health_check\": {\"required\": False, \"type\": \"str\"}, \"hold_down_time\":", "vdom: description: - Virtual domain, among those defined previously. A", "this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA", "list. type: list suboptions: seq_num: description: - Member sequence number.", "Examples include all parameters and values need to be adjusted", "which to base link selection. type: str choices: - latency", "Interface name. Source system.interface.name. type: str priority: description: - Priority", "Packet size of a twamp test session, type: int password:", "(formerly virtual WAN link). fortios_system_virtual_wan_link: host: \"{{ host }}\" username:", "port: \"18\" protocol: \"ping\" recoverytime: \"20\" security_mode: \"none\" server: \"192.168.100.40\"", "if the protocol is HTTP. type: str http_match: description: -", "'service', 'status'] dictionary = {} for attribute in option_list: if", "{\"required\": False, \"type\": \"str\"}, \"username\": {\"required\": False, \"type\": \"str\"}, \"password\":", "Ingress spillover threshold for this interface (0 - 16776000 kbit/s).", "\"int\"}, \"threshold_alert_packetloss\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_jitter\": {\"required\": False, \"type\":", "SD-WAN. type: str choices: - disable - enable volume_ratio: description:", "str dscp_reverse: description: - Enable/disable reverse traffic DSCP tag. type:", "the server over the selected protocol. type: int protocol: description:", "available bidirectional bandwidth in the formula of custom-profile-1. type: int", "Virtual WAN Link health-check. Source system.virtual-wan-link.health-check.name. type: str id: description:", "WAN link). default: null type: dict suboptions: fail_alert_interfaces: description: -", "str ingress_spillover_threshold: description: - Ingress spillover threshold for this interface", "'fail_detect', 'health_check', 'load_balance_mode', 'members', 'service', 'status'] dictionary = {} for", "under the terms of the GNU General Public License as", "- name: \"default_name_80 (source firewall.internet-service-custom.name)\" internet_service_custom_group: - name: \"default_name_82 (source", "Enable/disable this interface in the SD-WAN. type: str choices: -", "Alert threshold for jitter (ms). type: int threshold_alert_latency: description: -", "distributed to physical interfaces in the SD-WAN. type: list suboptions:", "\"str\"} }}, \"internet_service\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]},", "(source user.group.name)\" health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" hold_down_time: \"70\" id: \"71\"", "service: description: - Create SD-WAN rules or priority rules (also", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "interface: \"<your_own_value> (source system.interface.name)\" priority: \"44\" seq_num: \"45\" source: \"<your_own_value>\"", "\"type\": \"str\", \"choices\": [\"disable\", \"enable\"]} } } } module =", "of the Internet service provider that this interface is connected", "{\"required\": False, \"type\": \"bool\", \"default\": True}, \"ssl_verify\": {\"required\": False, \"type\":", "string expected from the server if the protocol is HTTP.", "False, \"type\": \"str\"}, \"gateway\": {\"required\": False, \"type\": \"str\"}, \"gateway6\": {\"required\":", "always type: str sample: \"200\" mkey: description: Master key (id)", "Twamp controller password in authentication mode type: str port: description:", "\"link_cost_factor\": {\"required\": False, \"type\": \"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\"]}, \"packetloss_threshold\":", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"dst_negate\": {\"required\": False,", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]} }}, \"load_balance_mode\": {\"required\": False, \"type\":", "FortiGate IP address. type: str required: false username: description: -", "username. type: str required: false password: description: - FortiOS or", "False, \"type\": \"str\"}, \"source6\": {\"required\": False, \"type\": \"str\"}, \"spillover_threshold\": {\"required\":", "Response string expected from the server if the protocol is", "firewall.address.name firewall.addrgrp.name. required: true type: str src_negate: description: - Enable/disable", "jitter (ms). type: int threshold_warning_latency: description: - Warning threshold for", "check or health check name. required: true type: str packet_size:", "- outbandwidth - bibandwidth - custom-profile-1 link_cost_threshold: description: - Percentage", "threshold_alert_jitter: \"29\" threshold_alert_latency: \"30\" threshold_alert_packetloss: \"31\" threshold_warning_jitter: \"32\" threshold_warning_latency: \"33\"", "Control-based Internet Service group name. Source application.group.name. required: true type:", "dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data:", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"fail_detect\": {\"required\": False, \"type\":", "playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or", "\"{{ password }}\" vdom: \"{{ vdom }}\" https: \"False\" system_virtual_wan_link:", "int name: description: - Status check or health check name.", "- disable dscp_forward: description: - Enable/disable forward traffic DSCP tag.", "3600 sec). type: int members: description: - Member sequence number", "traffic DSCP tag. type: str choices: - enable - disable", "- none - authentication server: description: - IP address or", "description: Last method used to provision the content into FortiGate", "the requests towards FortiGate must use HTTPS protocol. type: bool", "\"int\"}, \"threshold_alert_latency\": {\"required\": False, \"type\": \"int\"}, \"threshold_alert_packetloss\": {\"required\": False, \"type\":", "required: true type: int internet_service_ctrl_group: description: - Control-based Internet Service", "volume threshold is reached, new sessions spill over to other", "\"tos\": {\"required\": False, \"type\": \"str\"}, \"tos_mask\": {\"required\": False, \"type\": \"str\"},", "\"type\": \"int\"}, \"interface\": {\"required\": False, \"type\": \"str\"}, \"priority\": {\"required\": False,", "type: int protocol: description: - Protocol number. type: int quality_link:", "type: list suboptions: id: description: - Internet service ID. Source", "str choices: - enable - disable internet_service_ctrl: description: - Control-based", "\"type\": \"str\"}, \"interval\": {\"required\": False, \"type\": \"int\"}, \"members\": {\"required\": False,", "int protocol: description: - Protocol used to determine if the", "\"int\"}, \"sla\": {\"required\": False, \"type\": \"list\", \"options\": { \"health_check\": {\"required\":", "A PARTICULAR PURPOSE. See the # GNU General Public License", "the Free Software Foundation, either version 3 of the License,", "Control-based Internet Service group list. type: list suboptions: name: description:", "the FortiGate can communicate with the server. type: str choices:", "the interface (0 - 4294967295). Used for SD-WAN rules or", "list. type: list suboptions: name: description: - Custom Internet service", "\"str\"} }}, \"dst_negate\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]},", "(ms). type: int threshold_alert_latency: description: - Alert threshold for latency", "type: str internet_service: description: - Enable/disable use of Internet service", "= Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_system(module.params,", "free software: you can redistribute it and/or modify # it", "(source system.virtual-wan-link.members.seq-num)\" protocol: \"97\" quality_link: \"98\" route_tag: \"99\" sla: -", "False, \"type\": \"str\", \"choices\": [\"ping\", \"tcp-echo\", \"udp-echo\", \"http\", \"twamp\", \"ping6\"]},", "sla name: description: - Priority rule name. type: str packet_loss_weight:", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "returned: always type: str sample: 'PUT' http_status: description: Last result", "input_device: description: - Source interface name. type: list suboptions: name:", "interval: description: - Status check interval, or the time between", "description: - IPv6 gateway. type: str ingress_spillover_threshold: description: - Ingress", "- FortiOS or FortiGate password. type: str default: \"\" vdom:", "returned: always type: str sample: '1547' http_method: description: Last method", "username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on')", "Source firewall.internet-service-group.name. required: true type: str internet_service_id: description: - Internet", "\"host\": {\"required\": False, \"type\": \"str\"}, \"username\": {\"required\": False, \"type\": \"str\"},", "str dst: description: - Destination address name. type: list suboptions:", "the fortigate image returned: always type: str sample: '1547' http_method:", "recovered (1 - 3600). type: int security_mode: description: - Twamp", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service_group\": {\"required\": False,", "defined previously. A vdom is a virtual instance of the", "hold_down_time: description: - Waiting period in seconds when switching from", "if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg=\"Error in repo\", meta=result)", "tos: description: - Type of service bit pattern. type: str", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_reverse_tag\": {\"required\": False, \"type\":", "\"str\"} }}, \"internet_service_id\": {\"required\": False, \"type\": \"list\", \"options\": { \"id\":", "module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result", "{\"required\": False, \"type\": \"str\", \"choices\": [\"disable\", \"enable\"]}, \"volume_ratio\": {\"required\": False,", "for jitter (ms). type: int threshold_warning_latency: description: - Warning threshold", "\"dst_negate\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dst6\": {\"required\":", "\"enable\" dscp_forward_tag: \"<your_own_value>\" dscp_reverse: \"enable\" dscp_reverse_tag: \"<your_own_value>\" dst: - name:", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"dst_negate\": {\"required\":", "id: \"76\" internet_service_ctrl_group: - name: \"default_name_78 (source application.group.name)\" internet_service_custom: -", "legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error,", "system.interface.name)\" fail_detect: \"enable\" health_check: - addr_mode: \"ipv4\" failtime: \"8\" http_agent:", "Service group list. type: list suboptions: name: description: - Custom", "description: - Group name. Source user.group.name. required: true type: str", "\"ping6\"]}, \"recoverytime\": {\"required\": False, \"type\": \"int\"}, \"security_mode\": {\"required\": False, \"type\":", "with the server. type: str choices: - ping - tcp-echo", "module is required\") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed,", "src_negate: description: - Enable/disable negation of source address match. type:", "{\"required\": False, \"type\": \"int\"}, \"update_cascade_interface\": {\"required\": False, \"type\": \"str\", \"choices\":", "server. type: str spillover_threshold: description: - Egress spillover threshold for", "lost (1 - 3600). type: int http_agent: description: - String", "- id: \"86 (source firewall.internet-service.id)\" jitter_weight: \"87\" latency_weight: \"88\" link_cost_factor:", "== \"success\" or \\ status['http_method'] == \"DELETE\" and status['http_status'] ==", "member to the primary member (0 - 10000000). type: int", "\"password\": {\"required\": False, \"type\": \"str\", \"default\": \"\", \"no_log\": True}, \"vdom\":", "\"id\": {\"required\": False, \"type\": \"int\"} }}, \"src\": {\"required\": False, \"type\":", "enable - disable dscp_forward_tag: description: - Forward traffic DSCP tag.", "is HTTP. type: str http_match: description: - Response string expected", "\"str\"}, \"sla\": {\"required\": False, \"type\": \"list\", \"options\": { \"id\": {\"required\":", "IPv6). type: str choices: - ipv4 - ipv6 failtime: description:", "}}, \"threshold_alert_jitter\": {\"required\": False, \"type\": \"int\"}, \"threshold_alert_latency\": {\"required\": False, \"type\":", "true ssl_verify: description: - Ensures FortiGate certificate must be verified", "- Source address name. type: list suboptions: name: description: -", "custom-profile-1 link_cost_threshold: description: - Percentage threshold change of link cost", "\"list\", \"options\": { \"seq_num\": {\"required\": False, \"type\": \"int\"} }}, \"name\":", "is considered recovered (1 - 3600). type: int security_mode: description:", "or FQDN name of the server. type: str sla: description:", "internet connections using SD-WAN (formerly virtual WAN link). default: null", "list suboptions: name: description: - Physical interface name. Source system.interface.name.", "\"jitter\", \"packet-loss\", \"inbandwidth\", \"outbandwidth\", \"bibandwidth\", \"custom-profile-1\"]}, \"link_cost_threshold\": {\"required\": False, \"type\":", "domain, among those defined previously. A vdom is a virtual", "tag. type: str choices: - enable - disable dscp_forward_tag: description:", "custom-profile-1. type: int link_cost_factor: description: - Link cost factor. type:", "False, \"type\": \"int\"}, \"interface\": {\"required\": False, \"type\": \"str\"}, \"priority\": {\"required\":", "disable dscp_forward: description: - Enable/disable forward traffic DSCP tag. type:", "[\"source-ip-based\", \"weight-based\", \"usage-based\", \"source-dest-ip-based\", \"measured-volume-based\"]}, \"members\": {\"required\": False, \"type\": \"list\",", "False, \"type\": \"list\", \"options\": { \"comment\": {\"required\": False, \"type\": \"str\"},", "data['vdom'] system_virtual_wan_link_data = data['system_virtual_wan_link'] filtered_data = underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data)) return fos.set('system', 'virtual-wan-link',", "[\"enable\", \"disable\"]}, \"update_static_route\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}", "type: str dst_negate: description: - Enable/disable negation of destination address", "hosts: localhost vars: host: \"192.168.122.40\" username: \"admin\" password: \"\" vdom:", "description: - FortiOS or FortiGate IP address. type: str required:", "\"type\": \"int\"}, \"seq_num\": {\"required\": False, \"type\": \"int\"}, \"source\": {\"required\": False,", "\"type\": \"str\"}, \"tos_mask\": {\"required\": False, \"type\": \"str\"}, \"users\": {\"required\": False,", "{ \"name\": {\"required\": True, \"type\": \"str\"} }} }}, \"status\": {\"required\":", "not is_successful_status(resp), \\ resp['status'] == \"success\", \\ resp def main():", "\"packet_loss_weight\": {\"required\": False, \"type\": \"int\"}, \"priority_members\": {\"required\": False, \"type\": \"list\",", "jitter - packet-loss - inbandwidth - outbandwidth - bibandwidth -", "\"source6\": {\"required\": False, \"type\": \"str\"}, \"spillover_threshold\": {\"required\": False, \"type\": \"int\"},", "- disable - enable volume_ratio: description: - Measured volume ratio", "group name. Source firewall.internet-service-custom-group.name. required: true type: str internet_service_group: description:", "}}, \"internet_service_custom_group\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "str interval: description: - Status check interval, or the time", "not None and \\ 'username' in module.params and module.params['username'] is", "session, type: int password: description: - Twamp controller password in", "\"int\"}, \"weight\": {\"required\": False, \"type\": \"int\"} }}, \"service\": {\"required\": False,", "\"26\" link_cost_factor: \"latency\" packetloss_threshold: \"28\" threshold_alert_jitter: \"29\" threshold_alert_latency: \"30\" threshold_alert_packetloss:", "{\"required\": False, \"type\": \"int\"}, \"source\": {\"required\": False, \"type\": \"str\"}, \"source6\":", "\"choices\": [\"enable\", \"disable\"]}, \"src6\": {\"required\": False, \"type\": \"list\", \"options\": {", "- Enable/disable SD-WAN Internet connection status checking (failure detection). type:", "True, \"type\": \"str\"} }}, \"internet_service\": {\"required\": False, \"type\": \"str\", \"choices\":", "all values = percentage of link volume, 0 - 255).", "connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result =", "\"seq_num\": {\"required\": False, \"type\": \"int\"} }}, \"protocol\": {\"required\": False, \"type\":", "to control how sessions are distributed to physical interfaces in", "ipv4 - ipv6 bandwidth_weight: description: - Coefficient of reciprocal of", "any later version. # # This program is distributed in", "'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_system_virtual_wan_link", "type: str choices: - ping - tcp-echo - udp-echo -", "\"http_agent\": {\"required\": False, \"type\": \"str\"}, \"http_get\": {\"required\": False, \"type\": \"str\"},", "bit pattern. type: str tos_mask: description: - Type of service", "the SD-WAN. type: int status: description: - Enable/disable this interface", "link). fortios_system_virtual_wan_link: host: \"{{ host }}\" username: \"{{ username }}\"", "communicate with the server over the selected protocol. type: int", "\"threshold_warning_jitter\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_latency\": {\"required\": False, \"type\": \"int\"},", "of all values = percentage of link volume, 0 -", "{\"required\": False, \"type\": \"str\"}, \"hold_down_time\": {\"required\": False, \"type\": \"int\"}, \"id\":", "(FOS) device by allowing the user to set and modify", "- Physical interface name. Source system.interface.name. required: true type: str", "https: description: - Indicates if the requests towards FortiGate must", "services) to control how sessions are distributed to physical interfaces", "- Status check interval, or the time between attempting to", "= new_data return data def system_virtual_wan_link(data, fos): vdom = data['vdom']", "type: str choices: - enable - disable health_check: description: -", "}}\" vdom: \"{{ vdom }}\" https: \"False\" system_virtual_wan_link: fail_alert_interfaces: -", "True, \"type\": \"int\"} }}, \"jitter_weight\": {\"required\": False, \"type\": \"int\"}, \"latency_weight\":", "required: true type: str src_negate: description: - Enable/disable negation of", "FortiOS (FOS) device by allowing the user to set and", "Service group list. type: list suboptions: name: description: - Control-based", "priority of interfaces in the SD-WAN. type: str choices: -", "License as published by # the Free Software Foundation, either", "threshold_warning_jitter: description: - Warning threshold for jitter (ms). type: int", "the FortiGate that can be configured and used as a", "provision the content into FortiGate returned: always type: str sample:", "= fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import", "None, \"options\": { \"fail_alert_interfaces\": {\"required\": False, \"type\": \"list\", \"options\": {", "the formula of custom-profile-1. type: int priority_members: description: - Member", "[\"enable\", \"disable\"]}, \"dscp_forward_tag\": {\"required\": False, \"type\": \"str\"}, \"dscp_reverse\": {\"required\": False,", "''' build: description: Build number of the fortigate image returned:", "description: - Type of service bit pattern. type: str tos_mask:", "addr_mode: description: - Address mode (IPv4 or IPv6). type: str", "type: int jitter_weight: description: - Coefficient of jitter in the", "Reverse traffic DSCP tag. type: str dst: description: - Destination", "dscp_reverse: \"enable\" dscp_reverse_tag: \"<your_own_value>\" dst: - name: \"default_name_61 (source firewall.address.name", "str hold_down_time: description: - Waiting period in seconds when switching", "\"udp-echo\", \"http\", \"twamp\", \"ping6\"]}, \"recoverytime\": {\"required\": False, \"type\": \"int\"}, \"security_mode\":", "priority rules. type: int seq_num: description: - Sequence number(1-255). type:", "type: str sample: 'PUT' http_status: description: Last result given by", "- Member sequence number. Source system.virtual-wan-link.members.seq-num. type: int name: description:", "on which to base link selection. type: str choices: -", "link cost values that will result in policy route regeneration", "\"list\", \"options\": { \"seq_num\": {\"required\": False, \"type\": \"int\"} }}, \"protocol\":", "sample: \"200\" mkey: description: Master key (id) used in the", "the SD-WAN. type: int interface: description: - Interface name. Source", "\"disable\"]}, \"tos\": {\"required\": False, \"type\": \"str\"}, \"tos_mask\": {\"required\": False, \"type\":", "more details. # # You should have received a copy", "- disable src6: description: - Source address6 name. type: list", "status checking (failure detection). type: str choices: - enable -", "\"threshold_warning_packetloss\": {\"required\": False, \"type\": \"int\"}, \"update_cascade_interface\": {\"required\": False, \"type\": \"str\",", "required\") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result =", "- latency - jitter - packet-loss packetloss_threshold: description: - Packet", "latency - jitter - packet-loss packetloss_threshold: description: - Packet loss", "of service bit pattern. type: str tos_mask: description: - Type", "be useful, # but WITHOUT ANY WARRANTY; without even the", "not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path)", "DSCP tag. type: str dst: description: - Destination address name.", "\"type\": \"str\"}, \"dscp_reverse\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]},", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dscp_reverse_tag\": {\"required\": False, \"type\": \"str\"},", "type: int threshold_warning_packetloss: description: - Warning threshold for packet loss", "new sessions spill over to other interfaces in the SD-WAN.", "recoverytime: \"20\" security_mode: \"none\" server: \"192.168.100.40\" sla: - id: \"24\"", "\"<your_own_value>\" interval: \"12\" members: - seq_num: \"14 (source system.virtual-wan-link.members.seq-num)\" name:", "10000000). type: int latency_threshold: description: - Latency for SLA to", "Address or address group name. Source firewall.address.name firewall.addrgrp.name. required: true", "- String in the http-agent field in the HTTP header.", "{\"required\": False, \"type\": \"str\", \"choices\": [\"ping\", \"tcp-echo\", \"udp-echo\", \"http\", \"twamp\",", "by allowing the user to set and modify system feature", "authentication mode type: str port: description: - Port number used", "enable volume_ratio: description: - Measured volume ratio (this value /", "Service group name. Source firewall.internet-service-group.name. required: true type: str internet_service_id:", "SD-WAN rules or priority rules (also called services) to control", "{\"required\": False, \"type\": \"int\"} }}, \"protocol\": {\"required\": False, \"type\": \"int\"},", "False, \"type\": \"int\"}, \"member\": {\"required\": False, \"type\": \"int\"}, \"mode\": {\"required\":", "or priority rules (also called services) to control how sessions", "dscp_forward: \"enable\" dscp_forward_tag: \"<your_own_value>\" dscp_reverse: \"enable\" dscp_reverse_tag: \"<your_own_value>\" dst: -", "HTTP. type: str interval: description: - Status check interval, or", "str end_port: description: - End destination port number. type: int", "can communicate with it. type: list suboptions: addr_mode: description: -", "description: Build number of the fortigate image returned: always type:", "interface name. type: list suboptions: name: description: - Interface name.", "ipv4 - ipv6 failtime: description: - Number of failures before", "will be alerted. type: list suboptions: name: description: - Physical", "true type: str packet_size: description: - Packet size of a", "\"<your_own_value>\" http_get: \"<your_own_value>\" http_match: \"<your_own_value>\" interval: \"12\" members: - seq_num:", "\"type\": \"bool\", \"default\": True}, \"ssl_verify\": {\"required\": False, \"type\": \"bool\", \"default\":", "filtered_data = underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data)) return fos.set('system', 'virtual-wan-link', data=filtered_data, vdom=vdom) def is_successful_status(status):", "priority: \"44\" seq_num: \"45\" source: \"<your_own_value>\" source6: \"<your_own_value>\" spillover_threshold: \"48\"", "def system_virtual_wan_link(data, fos): vdom = data['vdom'] system_virtual_wan_link_data = data['system_virtual_wan_link'] filtered_data", "\"28\" threshold_alert_jitter: \"29\" threshold_alert_latency: \"30\" threshold_alert_packetloss: \"31\" threshold_warning_jitter: \"32\" threshold_warning_latency:", "Latency for SLA to make decision in milliseconds. (0 -", "the unit returned: always type: str sample: \"FGVMEVYYQT3AB5352\" status: description:", "\"list\", \"options\": { \"addr_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"ipv4\",", "packet_size: description: - Packet size of a twamp test session,", "to configure a FortiGate or FortiOS (FOS) device by allowing", "description: - SLA ID. type: int src: description: - Source", "IP address. type: str required: false username: description: - FortiOS", "destination port number. type: int gateway: description: - Enable/disable SD-WAN", "\"choices\": [\"ipv4\", \"ipv6\"]}, \"bandwidth_weight\": {\"required\": False, \"type\": \"int\"}, \"default\": {\"required\":", "over to other interfaces in the SD-WAN. type: int status:", "(source firewall.internet-service-custom-group.name)\" internet_service_group: - name: \"default_name_84 (source firewall.internet-service-group.name)\" internet_service_id: -", "Source system.interface.name. type: str priority: description: - Priority of the", "used to provision the content into FortiGate returned: always type:", "using SD-WAN (formerly virtual WAN link). fortios_system_virtual_wan_link: host: \"{{ host", "else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg=\"fortiosapi module", "different unit. type: str default: root https: description: - Indicates", "description: - Status check interval, or the time between attempting", "Enable/disable negation of destination address match. type: str choices: -", "\"<your_own_value>\" http_match: \"<your_own_value>\" interval: \"12\" members: - seq_num: \"14 (source", "address match. type: str choices: - enable - disable src6:", "revision: description: Internal revision number returned: always type: str sample:", "\"type\": \"int\"}, \"input_device\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "service. type: str choices: - enable - disable tos: description:", "type: str ingress_spillover_threshold: description: - Ingress spillover threshold for this", "division, print_function) # Copyright 2019 Fortinet, Inc. # # This", "description: - Coefficient of jitter in the formula of custom-profile-1.", "true type: str start_port: description: - Start destination port number.", "- FortiOS or FortiGate IP address. type: str required: false", "- tcp-echo - udp-echo - http - twamp - ping6", "type: str src_negate: description: - Enable/disable negation of source address", "or FortiGate IP address. type: str required: false username: description:", "Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def", "name. Source firewall.internet-service-custom-group.name. required: true type: str internet_service_group: description: -", "type: str choices: - enable - disable dscp_forward_tag: description: -", "or FortiOS (FOS) device by allowing the user to set", "firewall.addrgrp.name)\" src_negate: \"enable\" src6: - name: \"default_name_107 (source firewall.address6.name firewall.addrgrp6.name)\"", "''' - hosts: localhost vars: host: \"192.168.122.40\" username: \"admin\" password:", "developed by Fortinet - Run as a local_action in your", "\"int\"} }}, \"service\": {\"required\": False, \"type\": \"list\", \"options\": { \"addr_mode\":", "\"choices\": [\"enable\", \"disable\"]}, \"groups\": {\"required\": False, \"type\": \"list\", \"options\": {", "enable - disable dscp_reverse_tag: description: - Reverse traffic DSCP tag.", "{\"required\": True, \"type\": \"str\"} }}, \"src_negate\": {\"required\": False, \"type\": \"str\",", "how the priority rule sets the priority of interfaces in", "checking (failure detection). type: str choices: - enable - disable", "priority rule sets the priority of interfaces in the SD-WAN.", "in policy route regeneration (0 - 10000000). type: int member:", "of the unit returned: always type: str sample: \"FGVMEVYYQT3AB5352\" status:", "domain used returned: always type: str sample: \"root\" version: description:", "False, \"type\": \"list\", \"options\": { \"id\": {\"required\": True, \"type\": \"int\"},", "\"latency_weight\": {\"required\": False, \"type\": \"int\"}, \"link_cost_factor\": {\"required\": False, \"type\": \"str\",", "- Address or address group name. Source firewall.address.name firewall.addrgrp.name. required:", "gateway. type: str choices: - enable - disable groups: description:", "use of SD-WAN as default service. type: str choices: -", "(source firewall.address.name firewall.addrgrp.name)\" dst_negate: \"enable\" dst6: - name: \"default_name_64 (source", "\"list\", \"options\": { \"health_check\": {\"required\": False, \"type\": \"str\"}, \"id\": {\"required\":", "underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem =", "virtual WAN link). default: null type: dict suboptions: fail_alert_interfaces: description:", "\"dict\", \"default\": None, \"options\": { \"fail_alert_interfaces\": {\"required\": False, \"type\": \"list\",", "in milliseconds. (0 - 10000000). type: int link_cost_factor: description: -", "the GNU General Public License as published by # the", "\"str\", \"choices\": [\"enable\", \"disable\"]}, \"health_check\": {\"required\": False, \"type\": \"list\", \"options\":", "list suboptions: name: description: - Interface name. Source system.interface.name. required:", "virtual WAN link). fortios_system_virtual_wan_link: host: \"{{ host }}\" username: \"{{", "== \"success\", \\ resp def main(): fields = { \"host\":", "\"http\", \"twamp\", \"ping6\"]}, \"recoverytime\": {\"required\": False, \"type\": \"int\"}, \"security_mode\": {\"required\":", "- Response string expected from the server if the protocol", "in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return", "\"input_device\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and", "\"str\"}, \"gateway\": {\"required\": False, \"type\": \"str\"}, \"gateway6\": {\"required\": False, \"type\":", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"health_check\": {\"required\": False, \"type\": \"list\",", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"end_port\":", "new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')]", "name. Source firewall.internet-service-custom.name. required: true type: str internet_service_custom_group: description: -", "description: - Comments. type: str gateway: description: - The default", "[\"latency\", \"jitter\", \"packet-loss\", \"inbandwidth\", \"outbandwidth\", \"bibandwidth\", \"custom-profile-1\"]}, \"link_cost_threshold\": {\"required\": False,", "(source firewall.address.name firewall.addrgrp.name)\" src_negate: \"enable\" src6: - name: \"default_name_107 (source", "\"type\": \"int\"}, \"route_tag\": {\"required\": False, \"type\": \"int\"}, \"sla\": {\"required\": False,", "Public License # along with this program. If not, see", "False, \"type\": \"int\"}, \"id\": {\"required\": True, \"type\": \"int\"}, \"input_device\": {\"required\":", "False, \"type\": \"str\", \"choices\": [\"latency\", \"jitter\", \"packet-loss\", \"inbandwidth\", \"outbandwidth\", \"bibandwidth\",", "number. Source system.virtual-wan-link.members.seq-num. type: int protocol: description: - Protocol number.", "dscp_forward_tag: \"<your_own_value>\" dscp_reverse: \"enable\" dscp_reverse_tag: \"<your_own_value>\" dst: - name: \"default_name_61", "sla: - health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" id: \"102\" src: -", "always type: str sample: \"FGVMEVYYQT3AB5352\" status: description: Indication of the", "attribute in json and json[attribute] is not None: dictionary[attribute] =", "firewall.addrgrp.name. required: true type: str dst_negate: description: - Enable/disable negation", "\"disable\" volume_ratio: \"50\" weight: \"51\" service: - addr_mode: \"ipv4\" bandwidth_weight:", "number. type: int gateway: description: - Enable/disable SD-WAN service gateway.", "service ID list. type: list suboptions: id: description: - Internet", "internet_service_custom_group: description: - Custom Internet Service group list. type: list", "\"type\": \"int\"}, \"update_cascade_interface\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]},", "{\"required\": False, \"type\": \"str\"}, \"ingress_spillover_threshold\": {\"required\": False, \"type\": \"int\"}, \"interface\":", "str sample: '1547' http_method: description: Last method used to provision", "underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data)) return fos.set('system', 'virtual-wan-link', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status']", "description: - Address6 or address6 group name. Source firewall.address6.name firewall.addrgrp6.name.", "- authentication server: description: - IP address or FQDN name", "- disable dst6: description: - Destination address6 name. type: list", "- Quality grade. type: int route_tag: description: - IPv4 route", "over the selected protocol. type: int protocol: description: - Protocol", "Internet service provider that this interface is connected to. type:", "- The default gateway for this interface. Usually the default", "int interface: description: - Interface name. Source system.interface.name. type: str", "interfaces in the SD-WAN. type: str choices: - auto -", "health-check packet to the server. type: str spillover_threshold: description: -", "to the server. type: str spillover_threshold: description: - Egress spillover", "\"webfilter\" revision: description: Internal revision number returned: always type: str", "the health-check packet to the server. type: str spillover_threshold: description:", "IP address used in the health-check packet to the server.", "str choices: - enable - disable load_balance_mode: description: - Algorithm", "import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. #", "gateway for this interface. Usually the default gateway of the", "{\"required\": False, \"type\": \"int\"}, \"http_agent\": {\"required\": False, \"type\": \"str\"}, \"http_get\":", "\"str\"} }}, \"internet_service_custom\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "255) More traffic is directed to interfaces with higher weights.", "a proper CA. type: bool default: true version_added: 2.9 system_virtual_wan_link:", "Usually the default gateway of the Internet service provider that", "sample: \"root\" version: description: Version of the FortiGate returned: always", "ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos):", "fortigate image returned: always type: str sample: '1547' http_method: description:", "(0 - 4294967295). Used for SD-WAN rules or priority rules.", "if the protocol is HTTP. type: str interval: description: -", "sample: \"v5.6.3\" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import", "- URL used to communicate with the server if the", "- 4000). required: true type: int input_device: description: - Source", "\"disable\"]}, \"groups\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\":", "version_added: \"2.8\" author: - <NAME> (@mamunozgonzalez) - <NAME> (@thomnico) notes:", "suboptions: id: description: - Control-based Internet Service ID. required: true", "Protocol number. type: int quality_link: description: - Quality grade. type:", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"internet_service\": {\"required\":", "None and \\ 'password' in module.params and module.params['password'] is not", "type: str status: description: - Enable/disable SD-WAN. type: str choices:", "when switching from the back-up member to the primary member", "if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed,", "ID. Source firewall.internet-service.id. required: true type: int jitter_weight: description: -", "int security_mode: description: - Twamp controller security mode. type: str", "interval: \"12\" members: - seq_num: \"14 (source system.virtual-wan-link.members.seq-num)\" name: \"default_name_15\"", "\"<your_own_value>\" spillover_threshold: \"48\" status: \"disable\" volume_ratio: \"50\" weight: \"51\" service:", "- Destination address name. type: list suboptions: name: description: -", "the FortiGate can communicate with it. type: list suboptions: addr_mode:", "latency_threshold: \"26\" link_cost_factor: \"latency\" packetloss_threshold: \"28\" threshold_alert_jitter: \"29\" threshold_alert_latency: \"30\"", "from the back-up member to the primary member (0 -", "from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data,", "system.virtual-wan-link.members.seq-num. type: int protocol: description: - Protocol number. type: int", "SD-WAN Internet connection status checking (failure detection). type: str choices:", "with the server if the protocol if the protocol is", "volume_ratio: description: - Measured volume ratio (this value / sum", "False, \"type\": \"int\"}, \"latency_weight\": {\"required\": False, \"type\": \"int\"}, \"link_cost_factor\": {\"required\":", "true version_added: 2.9 system_virtual_wan_link: description: - Configure redundant internet connections", "SD-WAN. type: list suboptions: addr_mode: description: - Address mode (IPv4", "type: str sla: description: - Service level agreement (SLA). type:", "http - twamp - ping6 recoverytime: description: - Number of", "ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import", "fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else:", "operation applied returned: always type: str sample: \"200\" mkey: description:", "static route. type: str choices: - enable - disable load_balance_mode:", "\"type\": \"str\"} }}, \"src_negate\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "protocol: \"ping\" recoverytime: \"20\" security_mode: \"none\" server: \"192.168.100.40\" sla: -", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"start_port\": {\"required\": False, \"type\":", "int jitter_threshold: description: - Jitter for SLA to make decision", "type: int mode: description: - Control how the priority rule", "the time between attempting to connect to the server (1", "type: list suboptions: name: description: - Custom Internet service name.", "str choices: - latency - jitter - packet-loss - inbandwidth", "\"choices\": [\"source-ip-based\", \"weight-based\", \"usage-based\", \"source-dest-ip-based\", \"measured-volume-based\"]}, \"members\": {\"required\": False, \"type\":", "is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg=\"Error in repo\", meta=result) if __name__", "list suboptions: addr_mode: description: - Address mode (IPv4 or IPv6).", "mode (IPv4 or IPv6). type: str choices: - ipv4 -", "type: str default: \"\" vdom: description: - Virtual domain, among", "by FortiGate on last operation applied returned: always type: str", "\"type\": \"str\"}, \"id\": {\"required\": False, \"type\": \"int\"} }}, \"src\": {\"required\":", "str default: root https: description: - Indicates if the requests", "custom-profile-1. type: int latency_weight: description: - Coefficient of latency in", "provider that this interface is connected to. type: str gateway6:", "fortios_system_virtual_wan_link short_description: Configure redundant internet connections using SD-WAN (formerly virtual", "is connected to. type: str gateway6: description: - IPv6 gateway.", "in the HTTP header. type: str http_get: description: - URL", "Source firewall.address.name firewall.addrgrp.name. required: true type: str src_negate: description: -", "\"mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"auto\", \"manual\", \"priority\", \"sla\"]},", "- Custom Internet service name. Source firewall.internet-service-custom.name. required: true type:", "disable - enable volume_ratio: description: - Measured volume ratio (this", "Custom Internet service name. Source firewall.internet-service-custom.name. required: true type: str", "traffic to SD-WAN members. type: str choices: - source-ip-based -", "group name. Source firewall.address.name firewall.addrgrp.name. required: true type: str dst_negate:", "'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host,", "authentication server: description: - IP address or FQDN name of", "10000000). type: int member: description: - Member sequence number. type:", "security_mode: \"none\" server: \"192.168.100.40\" sla: - id: \"24\" jitter_threshold: \"25\"", "Enable/disable negation of source address match. type: str choices: -", "\"31\" threshold_warning_jitter: \"32\" threshold_warning_latency: \"33\" threshold_warning_packetloss: \"34\" update_cascade_interface: \"enable\" update_static_route:", "build: description: Build number of the fortigate image returned: always", "type: str gateway6: description: - IPv6 gateway. type: str ingress_spillover_threshold:", "(source application.group.name)\" internet_service_custom: - name: \"default_name_80 (source firewall.internet-service-custom.name)\" internet_service_custom_group: -", "- latency - jitter - packet-loss - inbandwidth - outbandwidth", "{\"required\": True, \"type\": \"int\"}, \"input_device\": {\"required\": False, \"type\": \"list\", \"options\":", "- FortiOS or FortiGate username. type: str required: false password:", "for packet loss (percentage). type: int threshold_warning_jitter: description: - Warning", "system.virtual-wan-link.health-check.name. type: str id: description: - SLA ID. type: int", "{ \"seq_num\": {\"required\": False, \"type\": \"int\"} }}, \"protocol\": {\"required\": False,", "sample: \"id\" name: description: Name of the table used to", "list suboptions: id: description: - Internet service ID. Source firewall.internet-service.id.", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"src_negate\": {\"required\":", "controller password in authentication mode type: str port: description: -", "\"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"disable\", \"enable\"]}, \"volume_ratio\": {\"required\":", "dst: - name: \"default_name_61 (source firewall.address.name firewall.addrgrp.name)\" dst_negate: \"enable\" dst6:", "description: - Algorithm or mode to use for load balancing", "balancing Internet traffic to SD-WAN members. type: str choices: -", "required: true type: str internet_service: description: - Enable/disable use of", "(SLA). type: list suboptions: health_check: description: - Virtual WAN Link", "server if the protocol if the protocol is HTTP. type:", "interface. type: str choices: - enable - disable update_static_route: description:", "\"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"fail_detect\": {\"required\":", "Service group list. type: list suboptions: name: description: - Internet", "http_get: \"<your_own_value>\" http_match: \"<your_own_value>\" interval: \"12\" members: - seq_num: \"14", "map route-tag. type: int sla: description: - Service level agreement", "(source firewall.internet-service-group.name)\" internet_service_id: - id: \"86 (source firewall.internet-service.id)\" jitter_weight: \"87\"", "service: - addr_mode: \"ipv4\" bandwidth_weight: \"54\" default: \"enable\" dscp_forward: \"enable\"", "False, \"type\": \"int\"}, \"threshold_alert_latency\": {\"required\": False, \"type\": \"int\"}, \"threshold_alert_packetloss\": {\"required\":", "this interface for weighted load balancing. (0 - 255) More", "{\"required\": False, \"type\": \"int\"}, \"members\": {\"required\": False, \"type\": \"list\", \"options\":", "the server. type: str choices: - ping - tcp-echo -", "\"33\" threshold_warning_packetloss: \"34\" update_cascade_interface: \"enable\" update_static_route: \"enable\" load_balance_mode: \"source-ip-based\" members:", "threshold_warning_jitter: \"32\" threshold_warning_latency: \"33\" threshold_warning_packetloss: \"34\" update_cascade_interface: \"enable\" update_static_route: \"enable\"", "member: description: - Member sequence number. type: int mode: description:", "- source-dest-ip-based - measured-volume-based members: description: - Physical FortiGate interfaces", "Last result given by FortiGate on last operation applied returned:", "{\"required\": False, \"type\": \"int\"}, \"latency_weight\": {\"required\": False, \"type\": \"int\"}, \"link_cost_factor\":", "A vdom is a virtual instance of the FortiGate that", "int default: description: - Enable/disable use of SD-WAN as default", "and FortiGate. description: - This module is able to configure", "\"bool\", \"default\": True}, \"ssl_verify\": {\"required\": False, \"type\": \"bool\", \"default\": True},", "bandwidth_weight: description: - Coefficient of reciprocal of available bidirectional bandwidth", "Physical interface name. Source system.interface.name. required: true type: str fail_detect:", "Source IP address used in the health-check packet to the", "priority rules (also called services) to control how sessions are", "- Coefficient of packet-loss in the formula of custom-profile-1. type:", "\"list\", \"options\": { \"name\": {\"required\": True, \"type\": \"str\"} }}, \"dst_negate\":", "other interfaces in the SD-WAN. type: int interface: description: -", "to fulfill the request returned: always type: str sample: \"urlfilter\"", "returned: always type: str sample: \"v5.6.3\" ''' from ansible.module_utils.basic import", "\"name\": {\"required\": True, \"type\": \"str\"}, \"packet_size\": {\"required\": False, \"type\": \"int\"},", "{\"required\": False, \"type\": \"str\"}, \"http_get\": {\"required\": False, \"type\": \"str\"}, \"http_match\":", "False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"dst6\": {\"required\": False, \"type\":", "False, \"type\": \"str\", \"choices\": [\"source-ip-based\", \"weight-based\", \"usage-based\", \"source-dest-ip-based\", \"measured-volume-based\"]}, \"members\":", "int status: description: - Enable/disable SD-WAN service. type: str choices:", "given by FortiGate on last operation applied returned: always type:", "type: str tos_mask: description: - Type of service evaluated bits.", "http_agent: \"<your_own_value>\" http_get: \"<your_own_value>\" http_match: \"<your_own_value>\" interval: \"12\" members: -", "\"spillover_threshold\": {\"required\": False, \"type\": \"int\"}, \"status\": {\"required\": False, \"type\": \"str\",", "the priority rule sets the priority of interfaces in the", "of link volume, 0 - 255). type: int weight: description:", "password: \"\" vdom: \"root\" ssl_verify: \"False\" tasks: - name: Configure", "Egress spillover threshold for this interface (0 - 16776000 kbit/s).", "\"type\": \"str\"} }}, \"start_port\": {\"required\": False, \"type\": \"int\"}, \"status\": {\"required\":", "\"type\": \"str\", \"choices\": [\"auto\", \"manual\", \"priority\", \"sla\"]}, \"name\": {\"required\": False,", "interfaces in the SD-WAN. type: list suboptions: addr_mode: description: -", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"end_port\": {\"required\": False, \"type\":", "str choices: - enable - disable update_static_route: description: - Enable/disable", "\"choices\": [\"enable\", \"disable\"]} }}, \"load_balance_mode\": {\"required\": False, \"type\": \"str\", \"choices\":", "\"internet_service_custom_group\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "\"8\" http_agent: \"<your_own_value>\" http_get: \"<your_own_value>\" http_match: \"<your_own_value>\" interval: \"12\" members:", "str tos_mask: description: - Type of service evaluated bits. type:", "{ \"name\": {\"required\": True, \"type\": \"str\"} }}, \"health_check\": {\"required\": False,", "\"<your_own_value>\" dst: - name: \"default_name_61 (source firewall.address.name firewall.addrgrp.name)\" dst_negate: \"enable\"", "\\ 'username' in module.params and module.params['username'] is not None and", "and module.params['host'] is not None and \\ 'username' in module.params", "- ipv4 - ipv6 failtime: description: - Number of failures", "{ \"host\": {\"required\": False, \"type\": \"str\"}, \"username\": {\"required\": False, \"type\":", "- Enable/disable negation of destination address match. type: str choices:", "id: \"24\" jitter_threshold: \"25\" latency_threshold: \"26\" link_cost_factor: \"latency\" packetloss_threshold: \"28\"", "that can be configured and used as a different unit.", "in the formula of custom-profile-1. type: int link_cost_factor: description: -", "False, \"type\": \"str\"}, \"tos_mask\": {\"required\": False, \"type\": \"str\"}, \"users\": {\"required\":", "in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username,", "- 10000000). type: int latency_threshold: description: - Latency for SLA", "}}, \"protocol\": {\"required\": False, \"type\": \"int\"}, \"quality_link\": {\"required\": False, \"type\":", "host }}\" username: \"{{ username }}\" password: \"{{ password }}\"", "Virtual domain, among those defined previously. A vdom is a", "\"type\": \"int\"}, \"gateway\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]},", "description: - Priority rule ID (1 - 4000). required: true", "Source system.virtual-wan-link.health-check.name. type: str id: description: - SLA ID. type:", "True, \"type\": \"str\"} }}, \"dst_negate\": {\"required\": False, \"type\": \"str\", \"choices\":", "{\"required\": False, \"type\": \"list\", \"options\": { \"comment\": {\"required\": False, \"type\":", "[\"ipv4\", \"ipv6\"]}, \"failtime\": {\"required\": False, \"type\": \"int\"}, \"http_agent\": {\"required\": False,", "group name. Source firewall.address.name firewall.addrgrp.name. required: true type: str src_negate:", "will result in policy route regeneration (0 - 10000000). type:", "updating the static route. type: str choices: - enable -", "source6: \"<your_own_value>\" spillover_threshold: \"48\" status: \"disable\" volume_ratio: \"50\" weight: \"51\"", "{} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v)", "jitter_threshold: \"25\" latency_threshold: \"26\" link_cost_factor: \"latency\" packetloss_threshold: \"28\" threshold_alert_jitter: \"29\"", "\"gateway\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"groups\": {\"required\":", "pattern. type: str tos_mask: description: - Type of service evaluated", "False, \"type\": \"list\", \"options\": { \"health_check\": {\"required\": False, \"type\": \"str\"},", "- Protocol number. type: int quality_link: description: - Quality grade.", "False, \"type\": \"str\", \"choices\": [\"disable\", \"enable\"]} } } } module", "'1.1'} DOCUMENTATION = ''' --- module: fortios_system_virtual_wan_link short_description: Configure redundant", "dst6: description: - Destination address6 name. type: list suboptions: name:", "str packet_loss_weight: description: - Coefficient of packet-loss in the formula", "of the GNU General Public License # along with this", "int status: description: - Enable/disable this interface in the SD-WAN.", "\"required\": False, \"type\": \"dict\", \"default\": None, \"options\": { \"fail_alert_interfaces\": {\"required\":", "description: - Destination address6 name. type: list suboptions: name: description:", "type: int latency_threshold: description: - Latency for SLA to make", "suboptions: addr_mode: description: - Address mode (IPv4 or IPv6). type:", "(0 - 255) More traffic is directed to interfaces with", "False, \"type\": \"str\"}, \"gateway6\": {\"required\": False, \"type\": \"str\"}, \"ingress_spillover_threshold\": {\"required\":", "- Start destination port number. type: int status: description: -", "before server is considered lost (1 - 3600). type: int", "{\"required\": True, \"type\": \"str\"} }}, \"start_port\": {\"required\": False, \"type\": \"int\"},", "id: \"102\" src: - name: \"default_name_104 (source firewall.address.name firewall.addrgrp.name)\" src_negate:", "true type: str internet_service: description: - Enable/disable use of Internet", "\"{{ host }}\" username: \"{{ username }}\" password: \"{{ password", "{\"required\": False, \"type\": \"str\"}, \"tos_mask\": {\"required\": False, \"type\": \"str\"}, \"users\":", "be adjusted to datasources before usage. Tested with FOS v6.0.5", "\"name\": {\"required\": True, \"type\": \"str\"} }}, \"health_check\": {\"required\": False, \"type\":", "choices: - enable - disable dscp_forward_tag: description: - Forward traffic", "be configured and used as a different unit. type: str", "\\ resp def main(): fields = { \"host\": {\"required\": False,", "server. type: str choices: - ping - tcp-echo - udp-echo", "latency (ms). type: int threshold_alert_packetloss: description: - Alert threshold for", "Measured volume ratio (this value / sum of all values", "\"type\": \"str\"} }}, \"dst_negate\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "\"97\" quality_link: \"98\" route_tag: \"99\" sla: - health_check: \"<your_own_value> (source", "addr_mode: \"ipv4\" failtime: \"8\" http_agent: \"<your_own_value>\" http_get: \"<your_own_value>\" http_match: \"<your_own_value>\"", "description: - Internet service ID list. type: list suboptions: id:", "User name. Source user.local.name. required: true type: str status: description:", "id: description: - Control-based Internet Service ID. required: true type:", "{\"required\": False, \"type\": \"str\", \"choices\": [\"none\", \"authentication\"]}, \"server\": {\"required\": False,", "\"int\"}, \"jitter_threshold\": {\"required\": False, \"type\": \"int\"}, \"latency_threshold\": {\"required\": False, \"type\":", "failtime: \"8\" http_agent: \"<your_own_value>\" http_get: \"<your_own_value>\" http_match: \"<your_own_value>\" interval: \"12\"", "(source firewall.address6.name firewall.addrgrp6.name)\" end_port: \"65\" gateway: \"enable\" groups: - name:", "False, \"type\": \"str\", \"choices\": [\"none\", \"authentication\"]}, \"server\": {\"required\": False, \"type\":", "interface: description: - Interface name. Source system.interface.name. type: str priority:", "- Control how the priority rule sets the priority of", "src: description: - Source address name. type: list suboptions: name:", "\"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"src6\": {\"required\": False, \"type\": \"list\",", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host =", "fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg=\"fortiosapi module is required\") fos", "password in authentication mode type: str port: description: - Port", "packet_loss_weight: \"94\" priority_members: - seq_num: \"96 (source system.virtual-wan-link.members.seq-num)\" protocol: \"97\"", "{\"required\": False, \"type\": \"str\", \"choices\": [\"source-ip-based\", \"weight-based\", \"usage-based\", \"source-dest-ip-based\", \"measured-volume-based\"]},", "10000000). type: int link_cost_factor: description: - Criteria on which to", "= AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead", "notes: - Requires fortiosapi library developed by Fortinet - Run", "\"int\"}, \"threshold_warning_packetloss\": {\"required\": False, \"type\": \"int\"}, \"update_cascade_interface\": {\"required\": False, \"type\":", "able to configure a FortiGate or FortiOS (FOS) device by", "{\"required\": False, \"type\": \"str\"}, \"sla\": {\"required\": False, \"type\": \"list\", \"options\":", "(source system.virtual-wan-link.health-check.name)\" hold_down_time: \"70\" id: \"71\" input_device: - name: \"default_name_73", "True, \"type\": \"str\"} }} }}, \"status\": {\"required\": False, \"type\": \"str\",", "- This module is able to configure a FortiGate or", "Forward traffic DSCP tag. type: str dscp_reverse: description: - Enable/disable", "threshold_alert_packetloss: \"31\" threshold_warning_jitter: \"32\" threshold_warning_latency: \"33\" threshold_warning_packetloss: \"34\" update_cascade_interface: \"enable\"", "General Public License # along with this program. If not,", "with it. type: list suboptions: addr_mode: description: - Address mode", "address6 name. type: list suboptions: name: description: - Address6 or", "name: \"default_name_82 (source firewall.internet-service-custom-group.name)\" internet_service_group: - name: \"default_name_84 (source firewall.internet-service-group.name)\"", "None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data,", "description: - IP address or FQDN name of the server.", "int protocol: description: - Protocol number. type: int quality_link: description:", "\"type\": \"int\"}, \"protocol\": {\"required\": False, \"type\": \"str\", \"choices\": [\"ping\", \"tcp-echo\",", "- enable - disable health_check: description: - SD-WAN status checking", "addr_mode: \"ipv4\" bandwidth_weight: \"54\" default: \"enable\" dscp_forward: \"enable\" dscp_forward_tag: \"<your_own_value>\"", "name: description: - Address or address group name. Source firewall.address.name", "the terms of the GNU General Public License as published", "\"none\" server: \"192.168.100.40\" sla: - id: \"24\" jitter_threshold: \"25\" latency_threshold:", "new_data return data def system_virtual_wan_link(data, fos): vdom = data['vdom'] system_virtual_wan_link_data", "\"options\": { \"id\": {\"required\": True, \"type\": \"int\"} }}, \"internet_service_ctrl_group\": {\"required\":", "['fail_alert_interfaces', 'fail_detect', 'health_check', 'load_balance_mode', 'members', 'service', 'status'] dictionary = {}", "\"hold_down_time\": {\"required\": False, \"type\": \"int\"}, \"id\": {\"required\": True, \"type\": \"int\"},", "choices: - enable - disable tos: description: - Type of", "unit returned: always type: str sample: \"FGVMEVYYQT3AB5352\" status: description: Indication", "\"id\": {\"required\": True, \"type\": \"int\"}, \"jitter_threshold\": {\"required\": False, \"type\": \"int\"},", "print_function) # Copyright 2019 Fortinet, Inc. # # This program", "threshold for latency (ms). type: int threshold_alert_packetloss: description: - Alert", "type: int quality_link: description: - Quality grade. type: int route_tag:", "\"vdom\": {\"required\": False, \"type\": \"str\", \"default\": \"root\"}, \"https\": {\"required\": False,", "type: str choices: - source-ip-based - weight-based - usage-based -", "to SD-WAN members. type: str choices: - source-ip-based - weight-based", "health_check: - addr_mode: \"ipv4\" failtime: \"8\" http_agent: \"<your_own_value>\" http_get: \"<your_own_value>\"", "\"type\": \"int\"}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]},", "{\"required\": False, \"type\": \"str\", \"choices\": [\"auto\", \"manual\", \"priority\", \"sla\"]}, \"name\":", "connection status checking (failure detection). type: str choices: - enable", "Twamp controller security mode. type: str choices: - none -", "\"str\", \"choices\": [\"source-ip-based\", \"weight-based\", \"usage-based\", \"source-dest-ip-based\", \"measured-volume-based\"]}, \"members\": {\"required\": False,", "FortiOS or FortiGate password. type: str default: \"\" vdom: description:", "- Twamp controller password in authentication mode type: str port:", "[\"enable\", \"disable\"]}, \"groups\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\":", "{\"required\": True, \"type\": \"str\"} }}, \"internet_service\": {\"required\": False, \"type\": \"str\",", "used returned: always type: str sample: \"root\" version: description: Version", "is_error, has_changed, result = fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try:", "factor. type: str choices: - latency - jitter - packet-loss", "description: - Coefficient of latency in the formula of custom-profile-1.", "- Physical FortiGate interfaces added to the virtual-wan-link. type: list", "usage-based - source-dest-ip-based - measured-volume-based members: description: - Physical FortiGate", "- Enable/disable update cascade interface. type: str choices: - enable", "sample: \"webfilter\" revision: description: Internal revision number returned: always type:", "required: true type: str fail_detect: description: - Enable/disable SD-WAN Internet", "- Sequence number(1-255). type: int source: description: - Source IP", "\"type\": \"int\"} }}, \"threshold_alert_jitter\": {\"required\": False, \"type\": \"int\"}, \"threshold_alert_latency\": {\"required\":", "suboptions: health_check: description: - Virtual WAN Link health-check. Source system.virtual-wan-link.health-check.name.", "Source system.virtual-wan-link.members.seq-num. type: int name: description: - Status check or", "suboptions: name: description: - Address or address group name. Source", "sample: \"17.0.2.10658\" serial: description: Serial number of the unit returned:", "the server. type: str spillover_threshold: description: - Egress spillover threshold", "\"addr_mode\": {\"required\": False, \"type\": \"str\", \"choices\": [\"ipv4\", \"ipv6\"]}, \"bandwidth_weight\": {\"required\":", "\"99\" sla: - health_check: \"<your_own_value> (source system.virtual-wan-link.health-check.name)\" id: \"102\" src:", "\"30\" threshold_alert_packetloss: \"31\" threshold_warning_jitter: \"32\" threshold_warning_latency: \"33\" threshold_warning_packetloss: \"34\" update_cascade_interface:", "HTTP. type: str http_match: description: - Response string expected from", "priority_members: - seq_num: \"96 (source system.virtual-wan-link.members.seq-num)\" protocol: \"97\" quality_link: \"98\"", "\"str\"} }} }}, \"status\": {\"required\": False, \"type\": \"str\", \"choices\": [\"disable\",", "or mode to use for load balancing Internet traffic to", "internet_service_ctrl: - id: \"76\" internet_service_ctrl_group: - name: \"default_name_78 (source application.group.name)\"", "\"str\", \"choices\": [\"auto\", \"manual\", \"priority\", \"sla\"]}, \"name\": {\"required\": False, \"type\":", "load balancing. (0 - 255) More traffic is directed to", "Source address name. type: list suboptions: name: description: - Address", "type: str choices: - ipv4 - ipv6 bandwidth_weight: description: -", "group list. type: list suboptions: name: description: - Internet Service", "if attribute in json and json[attribute] is not None: dictionary[attribute]", "\"choices\": [\"ping\", \"tcp-echo\", \"udp-echo\", \"http\", \"twamp\", \"ping6\"]}, \"recoverytime\": {\"required\": False,", "Enable/disable use of SD-WAN as default service. type: str choices:", "Source firewall.address6.name firewall.addrgrp6.name. required: true type: str start_port: description: -", "str sample: \"FGVMEVYYQT3AB5352\" status: description: Indication of the operation's result", "- name: \"default_name_104 (source firewall.address.name firewall.addrgrp.name)\" src_negate: \"enable\" src6: -", "\"src6\": {\"required\": False, \"type\": \"list\", \"options\": { \"name\": {\"required\": True,", "src6: description: - Source address6 name. type: list suboptions: name:", "\"91\" mode: \"auto\" name: \"default_name_93\" packet_loss_weight: \"94\" priority_members: - seq_num:", "{\"required\": True, \"type\": \"str\"} }}, \"internet_service_group\": {\"required\": False, \"type\": \"list\",", "required: true type: str internet_service_custom_group: description: - Custom Internet Service", "tos: \"<your_own_value>\" tos_mask: \"<your_own_value>\" users: - name: \"default_name_113 (source user.local.name)\"", "must be verified by a proper CA. type: bool default:", "- name: \"default_name_78 (source application.group.name)\" internet_service_custom: - name: \"default_name_80 (source", "True, \"type\": \"int\"} }}, \"internet_service_ctrl_group\": {\"required\": False, \"type\": \"list\", \"options\":", "server if the protocol is HTTP. type: str interval: description:", "type: str sample: \"webfilter\" revision: description: Internal revision number returned:", "Custom Internet Service group name. Source firewall.internet-service-custom-group.name. required: true type:", "ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION =", "Internet Service ID list. type: list suboptions: id: description: -", "False, \"type\": \"int\"}, \"threshold_alert_packetloss\": {\"required\": False, \"type\": \"int\"}, \"threshold_warning_jitter\": {\"required\":", "number. Source system.virtual-wan-link.members.seq-num. type: int name: description: - Status check", "\"admin\" password: \"\" vdom: \"root\" ssl_verify: \"False\" tasks: - name:", "SD-WAN service gateway. type: str choices: - enable - disable", "firewall.addrgrp6.name)\" end_port: \"65\" gateway: \"enable\" groups: - name: \"default_name_68 (source", "connect to the server (1 - 3600 sec). type: int", "= FortiOSHandler(connection) is_error, has_changed, result = fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG)", "\"seq_num\": {\"required\": False, \"type\": \"int\"}, \"source\": {\"required\": False, \"type\": \"str\"},", "internet connections using SD-WAN (formerly virtual WAN link). fortios_system_virtual_wan_link: host:", "(@thomnico) notes: - Requires fortiosapi library developed by Fortinet -", "- 10000000). type: int link_cost_factor: description: - Criteria on which", "server (1 - 3600 sec). type: int members: description: -", "threshold for packet loss (percentage). type: int update_cascade_interface: description: -", "sequence number list. type: list suboptions: seq_num: description: - Member", "sequence number. Source system.virtual-wan-link.members.seq-num. type: int protocol: description: - Protocol", "- name: \"default_name_82 (source firewall.internet-service-custom-group.name)\" internet_service_group: - name: \"default_name_84 (source", "src6: - name: \"default_name_107 (source firewall.address6.name firewall.addrgrp6.name)\" start_port: \"108\" status:", "name: description: - Custom Internet Service group name. Source firewall.internet-service-custom-group.name.", "name: Configure redundant internet connections using SD-WAN (formerly virtual WAN", "how sessions are distributed to physical interfaces in the SD-WAN.", "type: str dscp_reverse: description: - Enable/disable reverse traffic DSCP tag.", "\"failtime\": {\"required\": False, \"type\": \"int\"}, \"http_agent\": {\"required\": False, \"type\": \"str\"},", "- id: \"76\" internet_service_ctrl_group: - name: \"default_name_78 (source application.group.name)\" internet_service_custom:", "str choices: - disable - enable ''' EXAMPLES = '''", "custom-profile-1. type: int default: description: - Enable/disable use of SD-WAN", "name: description: - Physical interface name. Source system.interface.name. required: true", "the server if the protocol if the protocol is HTTP.", "- Warning threshold for packet loss (percentage). type: int update_cascade_interface:", "\"type\": \"int\"}, \"priority_members\": {\"required\": False, \"type\": \"list\", \"options\": { \"seq_num\":", "\"priority_members\": {\"required\": False, \"type\": \"list\", \"options\": { \"seq_num\": {\"required\": False,", "\"2.8\" author: - <NAME> (@mamunozgonzalez) - <NAME> (@thomnico) notes: -", "disable health_check: description: - SD-WAN status checking or health checking.", "\"<your_own_value> (source system.virtual-wan-link.health-check.name)\" id: \"102\" src: - name: \"default_name_104 (source", "list suboptions: name: description: - Address6 or address6 group name.", "latency_threshold: description: - Latency for SLA to make decision in", "\"type\": \"str\"} }}, \"internet_service\": {\"required\": False, \"type\": \"str\", \"choices\": [\"enable\",", "{\"required\": False, \"type\": \"str\", \"choices\": [\"enable\", \"disable\"]}, \"internet_service_ctrl\": {\"required\": False,", "FortiGate must use HTTPS protocol. type: bool default: true ssl_verify:", "packet to the server. type: str spillover_threshold: description: - Egress", "= system_virtual_wan_link(data, fos) return not is_successful_status(resp), \\ resp['status'] == \"success\",", "username: \"admin\" password: \"\" vdom: \"root\" ssl_verify: \"False\" tasks: -", "- addr_mode: \"ipv4\" failtime: \"8\" http_agent: \"<your_own_value>\" http_get: \"<your_own_value>\" http_match:", "int priority_members: description: - Member sequence number list. type: list", "of jitter in the formula of custom-profile-1. type: int latency_weight:", "\"str\", \"choices\": [\"enable\", \"disable\"]}, \"update_static_route\": {\"required\": False, \"type\": \"str\", \"choices\":", "\"<your_own_value>\" dscp_reverse: \"enable\" dscp_reverse_tag: \"<your_own_value>\" dst: - name: \"default_name_61 (source", "Source system.interface.name. required: true type: str internet_service: description: - Enable/disable", "comment: description: - Comments. type: str gateway: description: - The", "description: - Source IPv6 address used in the health-check packet", "type: int members: description: - Member sequence number list. type:", "ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username", "}}, \"service\": {\"required\": False, \"type\": \"list\", \"options\": { \"addr_mode\": {\"required\":", "member: \"91\" mode: \"auto\" name: \"default_name_93\" packet_loss_weight: \"94\" priority_members: -", "Enable/disable reverse traffic DSCP tag. type: str choices: - enable", "number(1-255). type: int source: description: - Source IP address used", "int mode: description: - Control how the priority rule sets", "threshold for this interface (0 - 16776000 kbit/s). When this", "the protocol is HTTP. type: str http_match: description: - Response", "{\"required\": False, \"type\": \"int\"}, \"quality_link\": {\"required\": False, \"type\": \"int\"}, \"route_tag\":", "data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off')", "Inc. # # This program is free software: you can", "level agreement (SLA). type: list suboptions: health_check: description: - Virtual", "image returned: always type: str sample: '1547' http_method: description: Last", "module.params['host'] is not None and \\ 'username' in module.params and", "host: \"{{ host }}\" username: \"{{ username }}\" password: \"{{", "# # You should have received a copy of the", "of successful responses received before server is considered recovered (1", "primary member (0 - 10000000). type: int id: description: -", "list. type: list suboptions: name: description: - Custom Internet Service", "device by allowing the user to set and modify system", "\"enable\" load_balance_mode: \"source-ip-based\" members: - comment: \"Comments.\" gateway: \"<your_own_value>\" gateway6:", "without even the implied warranty of # MERCHANTABILITY or FITNESS", "type: str choices: - auto - manual - priority -" ]
[ "return self.abierta def print_cosas(self): print(\"hola\") def set_abierta(self, value): self.abierta =", "print(\"Ahora estas en la habitacion\", habitacion.id) if habitacion.hijos[0] == None:", "def set_lado2(self, value): self.lado2 = value def espuerta(self): return True", "if habitacion.hijos[0].activa == True: print(\"La bomba ha estallado\") if self.abierta==False:", "== habitacion.id or self.lado2.id == habitacion.id): print(\"Ahora estas en la", "habitacion.hijos[0].activa == True: print(\"La bomba ha estallado\") if self.abierta==False: print(\"La", "self.abierta==True and (self.lado1.id == habitacion.id or self.lado2.id == habitacion.id): print(\"Ahora", "ElementoMapa import ElementoMapa class Puerta (ElementoMapa): def __init__(self): self.abierta= True", "return self.lado1 def set_lado1(self, value): self.lado1 = value def get_lado2(self):", "def entrar(self,habitacion): if self.abierta==True and (self.lado1.id == habitacion.id or self.lado2.id", "value def get_lado2(self): return self.lado2 def set_lado2(self, value): self.lado2 =", "def get_abierta(self): return self.abierta def print_cosas(self): print(\"hola\") def set_abierta(self, value):", "en la habitacion\", habitacion.id) if habitacion.hijos[0] == None: pass else:", "def get_lado1(self): return self.lado1 def set_lado1(self, value): self.lado1 = value", "self.abierta def print_cosas(self): print(\"hola\") def set_abierta(self, value): self.abierta = value", "value): self.lado1 = value def get_lado2(self): return self.lado2 def set_lado2(self,", "self.lado2.id == habitacion.id): print(\"Ahora estas en la habitacion\", habitacion.id) if", "self.lado1 def set_lado1(self, value): self.lado1 = value def get_lado2(self): return", "habitacion.id or self.lado2.id == habitacion.id): print(\"Ahora estas en la habitacion\",", "if self.abierta==True and (self.lado1.id == habitacion.id or self.lado2.id == habitacion.id):", "self.lado2 = value def espuerta(self): return True def abrir(self): self.abierta=True", "else: if habitacion.hijos[0].activa == True: print(\"La bomba ha estallado\") if", "def abrir(self): self.abierta=True def entrar(self,habitacion): if self.abierta==True and (self.lado1.id ==", "== habitacion.id): print(\"Ahora estas en la habitacion\", habitacion.id) if habitacion.hijos[0]", "__init__(self): self.abierta= True self.lado2=None self.lado1=None def get_abierta(self): return self.abierta def", "espuerta(self): return True def abrir(self): self.abierta=True def entrar(self,habitacion): if self.abierta==True", "self.abierta = value def get_lado1(self): return self.lado1 def set_lado1(self, value):", "get_lado1(self): return self.lado1 def set_lado1(self, value): self.lado1 = value def", "estas en la habitacion\", habitacion.id) if habitacion.hijos[0] == None: pass", "def print_cosas(self): print(\"hola\") def set_abierta(self, value): self.abierta = value def", "ElementoMapa class Puerta (ElementoMapa): def __init__(self): self.abierta= True self.lado2=None self.lado1=None", "def set_lado1(self, value): self.lado1 = value def get_lado2(self): return self.lado2", "la habitacion\", habitacion.id) if habitacion.hijos[0] == None: pass else: if", "import ElementoMapa class Puerta (ElementoMapa): def __init__(self): self.abierta= True self.lado2=None", "None: pass else: if habitacion.hijos[0].activa == True: print(\"La bomba ha", "pass else: if habitacion.hijos[0].activa == True: print(\"La bomba ha estallado\")", "value def get_lado1(self): return self.lado1 def set_lado1(self, value): self.lado1 =", "value def espuerta(self): return True def abrir(self): self.abierta=True def entrar(self,habitacion):", "get_lado2(self): return self.lado2 def set_lado2(self, value): self.lado2 = value def", "def espuerta(self): return True def abrir(self): self.abierta=True def entrar(self,habitacion): if", "return True def abrir(self): self.abierta=True def entrar(self,habitacion): if self.abierta==True and", "habitacion\", habitacion.id) if habitacion.hijos[0] == None: pass else: if habitacion.hijos[0].activa", "True: print(\"La bomba ha estallado\") if self.abierta==False: print(\"La puerta esta", "value): self.abierta = value def get_lado1(self): return self.lado1 def set_lado1(self,", "Puerta (ElementoMapa): def __init__(self): self.abierta= True self.lado2=None self.lado1=None def get_abierta(self):", "set_lado1(self, value): self.lado1 = value def get_lado2(self): return self.lado2 def", "habitacion.id): print(\"Ahora estas en la habitacion\", habitacion.id) if habitacion.hijos[0] ==", "def set_abierta(self, value): self.abierta = value def get_lado1(self): return self.lado1", "= value def get_lado2(self): return self.lado2 def set_lado2(self, value): self.lado2", "and (self.lado1.id == habitacion.id or self.lado2.id == habitacion.id): print(\"Ahora estas", "== True: print(\"La bomba ha estallado\") if self.abierta==False: print(\"La puerta", "True self.lado2=None self.lado1=None def get_abierta(self): return self.abierta def print_cosas(self): print(\"hola\")", "get_abierta(self): return self.abierta def print_cosas(self): print(\"hola\") def set_abierta(self, value): self.abierta", "self.abierta= True self.lado2=None self.lado1=None def get_abierta(self): return self.abierta def print_cosas(self):", "entrar(self,habitacion): if self.abierta==True and (self.lado1.id == habitacion.id or self.lado2.id ==", "class Puerta (ElementoMapa): def __init__(self): self.abierta= True self.lado2=None self.lado1=None def", "def __init__(self): self.abierta= True self.lado2=None self.lado1=None def get_abierta(self): return self.abierta", "print(\"hola\") def set_abierta(self, value): self.abierta = value def get_lado1(self): return", "habitacion.hijos[0] == None: pass else: if habitacion.hijos[0].activa == True: print(\"La", "(ElementoMapa): def __init__(self): self.abierta= True self.lado2=None self.lado1=None def get_abierta(self): return", "set_lado2(self, value): self.lado2 = value def espuerta(self): return True def", "set_abierta(self, value): self.abierta = value def get_lado1(self): return self.lado1 def", "print_cosas(self): print(\"hola\") def set_abierta(self, value): self.abierta = value def get_lado1(self):", "== None: pass else: if habitacion.hijos[0].activa == True: print(\"La bomba", "(self.lado1.id == habitacion.id or self.lado2.id == habitacion.id): print(\"Ahora estas en", "self.lado1 = value def get_lado2(self): return self.lado2 def set_lado2(self, value):", "True def abrir(self): self.abierta=True def entrar(self,habitacion): if self.abierta==True and (self.lado1.id", "def get_lado2(self): return self.lado2 def set_lado2(self, value): self.lado2 = value", "habitacion.id) if habitacion.hijos[0] == None: pass else: if habitacion.hijos[0].activa ==", "if habitacion.hijos[0] == None: pass else: if habitacion.hijos[0].activa == True:", "self.lado2=None self.lado1=None def get_abierta(self): return self.abierta def print_cosas(self): print(\"hola\") def", "= value def espuerta(self): return True def abrir(self): self.abierta=True def", "self.lado2 def set_lado2(self, value): self.lado2 = value def espuerta(self): return", "or self.lado2.id == habitacion.id): print(\"Ahora estas en la habitacion\", habitacion.id)", "abrir(self): self.abierta=True def entrar(self,habitacion): if self.abierta==True and (self.lado1.id == habitacion.id", "self.abierta=True def entrar(self,habitacion): if self.abierta==True and (self.lado1.id == habitacion.id or", "from ElementoMapa import ElementoMapa class Puerta (ElementoMapa): def __init__(self): self.abierta=", "print(\"La bomba ha estallado\") if self.abierta==False: print(\"La puerta esta cerrada\")", "self.lado1=None def get_abierta(self): return self.abierta def print_cosas(self): print(\"hola\") def set_abierta(self,", "return self.lado2 def set_lado2(self, value): self.lado2 = value def espuerta(self):", "= value def get_lado1(self): return self.lado1 def set_lado1(self, value): self.lado1", "value): self.lado2 = value def espuerta(self): return True def abrir(self):", "<filename>src/Puerta.py from ElementoMapa import ElementoMapa class Puerta (ElementoMapa): def __init__(self):" ]
[ "and other stuff # on touch_down() = When our fingers/mouse", "touch_up() - when we lift our finger off the screen", "velocity = ReferenceListProperty(velocity_x, velocity_y) # Latest Position of the Ball", "if touch.x > self.width * 3 / 4: self.player2.center_y =", "\"\"\" from kivy.app import App from kivy.uix.widget import Widget from", "Velocity + Current Position def move(self): self.pos = Vector(*self.velocity) +", "on touch_up() - when we lift our finger off the", "- Create the App # Step 2 - Create the", "-1.1 # Bounce off left and increase th score if", "-1 self.player1.score += 1 # Bounce off right and increase", "NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) # Latest Position of the", "1 - Create the App # Step 2 - Create", "= ObjectProperty(None) player1 = ObjectProperty(None) player2 = ObjectProperty(None) def serve_ball(self):", "or (self.ball.y > self.height - 50): self.ball.velocity_y *= -1.1 #", "1 / 4: self.player1.center_y = touch.y if touch.x > self.width", "App from kivy.uix.widget import Widget from kivy.properties import NumericProperty, ReferenceListProperty,", "player1 = ObjectProperty(None) player2 = ObjectProperty(None) def serve_ball(self): self.ball.velocity =", "Bounce off left and increase th score if self.ball.x <", "increase the score if self.ball.x > self.width - 50: self.ball.velocity_x", "= Current Velocity + Current Position def move(self): self.pos =", "touch.y class PongApp(App): def build(self): game = PongGame() game.serve_ball() Clock.schedule_interval(game.update,", "-1 self.player2.score += 1 self.player1.bounce_ball(self.ball) self.player2.bounce_ball(self.ball) def on_touch_move(self, touch): if", "/ 1 / 4: self.player1.center_y = touch.y if touch.x >", "NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) # Latest", "world') class PongBall(Widget): velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity", "- 50): self.ball.velocity_y *= -1.1 # Bounce off left and", "< self.width / 1 / 4: self.player1.center_y = touch.y if", "fingers/mouse touches he screen # on touch_up() - when we", "score if self.ball.x < 0: self.ball.velocity_x *= -1 self.player1.score +=", "self.width - 50: self.ball.velocity_x *= -1 self.player2.score += 1 self.player1.bounce_ball(self.ball)", "increase th score if self.ball.x < 0: self.ball.velocity_x *= -1", "Widget from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty from kivy.vector import", "stuff # on touch_down() = When our fingers/mouse touches he", "if touch.x < self.width / 1 / 4: self.player1.center_y =", "3 / 4: self.player2.center_y = touch.y class PongApp(App): def build(self):", "the Game # Step 3 - Build the Game #", "player2 = ObjectProperty(None) def serve_ball(self): self.ball.velocity = Vector(4, 0).rotate(randint(0, 360))", "/ 4: self.player2.center_y = touch.y class PongApp(App): def build(self): game", "random import randint class PongPaddle(Widget): score = NumericProperty(0) def bounce_ball(self,", "the score if self.ball.x > self.width - 50: self.ball.velocity_x *=", "4: self.player1.center_y = touch.y if touch.x > self.width * 3", "App # Step 2 - Create the Game # Step", "# Latest Position of the Ball = Current Velocity +", "ball by calling the move function and other stuff #", "and bottom Y if (self.ball.y < 0) or (self.ball.y >", "screen # on touch_up() - when we lift our finger", "Ball = Current Velocity + Current Position def move(self): self.pos", "self.player1.center_y = touch.y if touch.x > self.width * 3 /", "= NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) # Latest Position of", "Step 2 - Create the Game # Step 3 -", "0: self.ball.velocity_x *= -1 self.player1.score += 1 # Bounce off", "ObjectProperty from kivy.vector import Vector from kivy.clock import Clock from", "self.ball.move() # Bounce off top and bottom Y if (self.ball.y", "from kivy.uix.widget import Widget from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty", "50): self.ball.velocity_y *= -1.1 # Bounce off left and increase", "the screen class PongGame(Widget): ball = ObjectProperty(None) player1 = ObjectProperty(None)", "Step 3 - Build the Game # Step 4 -", "our fingers/mouse touches he screen # on touch_up() - when", "moving the ball by calling the move function and other", "PongGame(Widget): ball = ObjectProperty(None) player1 = ObjectProperty(None) player2 = ObjectProperty(None)", "/ 4: self.player1.center_y = touch.y if touch.x > self.width *", "= Vector(4, 0).rotate(randint(0, 360)) def update(self, dt): self.ball.move() # Bounce", "print('hello world') class PongBall(Widget): velocity_x = NumericProperty(0) velocity_y = NumericProperty(0)", "> self.width * 3 / 4: self.player2.center_y = touch.y class", "class PongPaddle(Widget): score = NumericProperty(0) def bounce_ball(self, ball): if self.collide_widget(ball):", "0).rotate(randint(0, 360)) def update(self, dt): self.ball.move() # Bounce off top", "touch.x > self.width * 3 / 4: self.player2.center_y = touch.y", "after touching it # on_touch_move() - when we drag our", "calling the move function and other stuff # on touch_down()", "dt): self.ball.move() # Bounce off top and bottom Y if", "ReferenceListProperty(velocity_x, velocity_y) # Latest Position of the Ball = Current", "> self.width - 50: self.ball.velocity_x *= -1 self.player2.score += 1", "ReferenceListProperty, ObjectProperty from kivy.vector import Vector from kivy.clock import Clock", "score = NumericProperty(0) def bounce_ball(self, ball): if self.collide_widget(ball): ball.velocity_x *=", "randint class PongPaddle(Widget): score = NumericProperty(0) def bounce_ball(self, ball): if", "Current Position def move(self): self.pos = Vector(*self.velocity) + self.pos #", "Bounce off top and bottom Y if (self.ball.y < 0)", "on_touch_move(self, touch): if touch.x < self.width / 1 / 4:", "= touch.y if touch.x > self.width * 3 / 4:", "score if self.ball.x > self.width - 50: self.ball.velocity_x *= -1", "build(self): game = PongGame() game.serve_ball() Clock.schedule_interval(game.update, 1.0 / 60.0) return", "= touch.y class PongApp(App): def build(self): game = PongGame() game.serve_ball()", "+= 1 # Bounce off right and increase the score", "import randint class PongPaddle(Widget): score = NumericProperty(0) def bounce_ball(self, ball):", "class PongGame(Widget): ball = ObjectProperty(None) player1 = ObjectProperty(None) player2 =", "- Run the App \"\"\" from kivy.app import App from", "import Widget from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty from kivy.vector", "- Build the Game # Step 4 - Run the", "\"\"\" # Step 1 - Create the App # Step", "- when we lift our finger off the screen after", "from random import randint class PongPaddle(Widget): score = NumericProperty(0) def", "off the screen after touching it # on_touch_move() - when", "if (self.ball.y < 0) or (self.ball.y > self.height - 50):", "Game # Step 4 - Run the App \"\"\" from", "self.ball.x < 0: self.ball.velocity_x *= -1 self.player1.score += 1 #", "Run the App \"\"\" from kivy.app import App from kivy.uix.widget", "kivy.app import App from kivy.uix.widget import Widget from kivy.properties import", "and increase th score if self.ball.x < 0: self.ball.velocity_x *=", "the App # Step 2 - Create the Game #", "PongBall(Widget): velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x,", "self.ball.velocity_x *= -1 self.player1.score += 1 # Bounce off right", "= ReferenceListProperty(velocity_x, velocity_y) # Latest Position of the Ball =", "on_touch_move() - when we drag our finger on the screen", "the Game # Step 4 - Run the App \"\"\"", "the screen after touching it # on_touch_move() - when we", "bottom Y if (self.ball.y < 0) or (self.ball.y > self.height", "# on touch_up() - when we lift our finger off", "# Bounce off top and bottom Y if (self.ball.y <", "# Bounce off left and increase th score if self.ball.x", "off right and increase the score if self.ball.x > self.width", "when we lift our finger off the screen after touching", "of the Ball = Current Velocity + Current Position def", "self.ball.x > self.width - 50: self.ball.velocity_x *= -1 self.player2.score +=", "< 0: self.ball.velocity_x *= -1 self.player1.score += 1 # Bounce", "touching it # on_touch_move() - when we drag our finger", "from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty from kivy.vector import Vector", "import NumericProperty, ReferenceListProperty, ObjectProperty from kivy.vector import Vector from kivy.clock", "NumericProperty(0) def bounce_ball(self, ball): if self.collide_widget(ball): ball.velocity_x *= -1 print('hello", "*= -1 self.player2.score += 1 self.player1.bounce_ball(self.ball) self.player2.bounce_ball(self.ball) def on_touch_move(self, touch):", "1 # Bounce off right and increase the score if", "* 3 / 4: self.player2.center_y = touch.y class PongApp(App): def", "= ObjectProperty(None) player2 = ObjectProperty(None) def serve_ball(self): self.ball.velocity = Vector(4,", "touch): if touch.x < self.width / 1 / 4: self.player1.center_y", "0) or (self.ball.y > self.height - 50): self.ball.velocity_y *= -1.1", "4: self.player2.center_y = touch.y class PongApp(App): def build(self): game =", "+ self.pos # Update - moving the ball by calling", "velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) # Latest Position", "off top and bottom Y if (self.ball.y < 0) or", "import Vector from kivy.clock import Clock from random import randint", "lift our finger off the screen after touching it #", "Bounce off right and increase the score if self.ball.x >", "we lift our finger off the screen after touching it", "# on_touch_move() - when we drag our finger on the", "3 - Build the Game # Step 4 - Run", "50: self.ball.velocity_x *= -1 self.player2.score += 1 self.player1.bounce_ball(self.ball) self.player2.bounce_ball(self.ball) def", "velocity_y) # Latest Position of the Ball = Current Velocity", "- moving the ball by calling the move function and", "on the screen class PongGame(Widget): ball = ObjectProperty(None) player1 =", "2 - Create the Game # Step 3 - Build", "ObjectProperty(None) player2 = ObjectProperty(None) def serve_ball(self): self.ball.velocity = Vector(4, 0).rotate(randint(0,", "= NumericProperty(0) def bounce_ball(self, ball): if self.collide_widget(ball): ball.velocity_x *= -1", "Position def move(self): self.pos = Vector(*self.velocity) + self.pos # Update", "we drag our finger on the screen class PongGame(Widget): ball", "*= -1 self.player1.score += 1 # Bounce off right and", "if self.ball.x > self.width - 50: self.ball.velocity_x *= -1 self.player2.score", "# Step 1 - Create the App # Step 2", "if self.collide_widget(ball): ball.velocity_x *= -1 print('hello world') class PongBall(Widget): velocity_x", "import Clock from random import randint class PongPaddle(Widget): score =", "self.player1.score += 1 # Bounce off right and increase the", "def on_touch_move(self, touch): if touch.x < self.width / 1 /", "360)) def update(self, dt): self.ball.move() # Bounce off top and", "def build(self): game = PongGame() game.serve_ball() Clock.schedule_interval(game.update, 1.0 / 60.0)", "on touch_down() = When our fingers/mouse touches he screen #", "- Create the Game # Step 3 - Build the", "= ObjectProperty(None) def serve_ball(self): self.ball.velocity = Vector(4, 0).rotate(randint(0, 360)) def", "= When our fingers/mouse touches he screen # on touch_up()", "- when we drag our finger on the screen class", "ObjectProperty(None) player1 = ObjectProperty(None) player2 = ObjectProperty(None) def serve_ball(self): self.ball.velocity", "Y if (self.ball.y < 0) or (self.ball.y > self.height -", "def bounce_ball(self, ball): if self.collide_widget(ball): ball.velocity_x *= -1 print('hello world')", "self.pos = Vector(*self.velocity) + self.pos # Update - moving the", "(self.ball.y > self.height - 50): self.ball.velocity_y *= -1.1 # Bounce", "PongPaddle(Widget): score = NumericProperty(0) def bounce_ball(self, ball): if self.collide_widget(ball): ball.velocity_x", "finger on the screen class PongGame(Widget): ball = ObjectProperty(None) player1", "ObjectProperty(None) def serve_ball(self): self.ball.velocity = Vector(4, 0).rotate(randint(0, 360)) def update(self,", "+= 1 self.player1.bounce_ball(self.ball) self.player2.bounce_ball(self.ball) def on_touch_move(self, touch): if touch.x <", "Vector from kivy.clock import Clock from random import randint class", "drag our finger on the screen class PongGame(Widget): ball =", "game = PongGame() game.serve_ball() Clock.schedule_interval(game.update, 1.0 / 60.0) return game", "our finger on the screen class PongGame(Widget): ball = ObjectProperty(None)", "Step 1 - Create the App # Step 2 -", "*= -1 print('hello world') class PongBall(Widget): velocity_x = NumericProperty(0) velocity_y", "self.width * 3 / 4: self.player2.center_y = touch.y class PongApp(App):", "self.player1.bounce_ball(self.ball) self.player2.bounce_ball(self.ball) def on_touch_move(self, touch): if touch.x < self.width /", "off left and increase th score if self.ball.x < 0:", "self.player2.score += 1 self.player1.bounce_ball(self.ball) self.player2.bounce_ball(self.ball) def on_touch_move(self, touch): if touch.x", "self.width / 1 / 4: self.player1.center_y = touch.y if touch.x", "the Ball = Current Velocity + Current Position def move(self):", "class PongApp(App): def build(self): game = PongGame() game.serve_ball() Clock.schedule_interval(game.update, 1.0", "def serve_ball(self): self.ball.velocity = Vector(4, 0).rotate(randint(0, 360)) def update(self, dt):", "Latest Position of the Ball = Current Velocity + Current", "kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty from kivy.vector import Vector from", "self.ball.velocity = Vector(4, 0).rotate(randint(0, 360)) def update(self, dt): self.ball.move() #", "Build the Game # Step 4 - Run the App", "-1 print('hello world') class PongBall(Widget): velocity_x = NumericProperty(0) velocity_y =", "the App \"\"\" from kivy.app import App from kivy.uix.widget import", "move function and other stuff # on touch_down() = When", "Position of the Ball = Current Velocity + Current Position", "function and other stuff # on touch_down() = When our", "the ball by calling the move function and other stuff", "if self.ball.x < 0: self.ball.velocity_x *= -1 self.player1.score += 1", "screen after touching it # on_touch_move() - when we drag", "App \"\"\" from kivy.app import App from kivy.uix.widget import Widget", "Clock from random import randint class PongPaddle(Widget): score = NumericProperty(0)", "def update(self, dt): self.ball.move() # Bounce off top and bottom", "= PongGame() game.serve_ball() Clock.schedule_interval(game.update, 1.0 / 60.0) return game PongApp().run()", "= NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y) #", "self.collide_widget(ball): ball.velocity_x *= -1 print('hello world') class PongBall(Widget): velocity_x =", "Current Velocity + Current Position def move(self): self.pos = Vector(*self.velocity)", "self.ball.velocity_x *= -1 self.player2.score += 1 self.player1.bounce_ball(self.ball) self.player2.bounce_ball(self.ball) def on_touch_move(self,", "Vector(4, 0).rotate(randint(0, 360)) def update(self, dt): self.ball.move() # Bounce off", "< 0) or (self.ball.y > self.height - 50): self.ball.velocity_y *=", "touch.y if touch.x > self.width * 3 / 4: self.player2.center_y", "# Bounce off right and increase the score if self.ball.x", "the move function and other stuff # on touch_down() =", "kivy.uix.widget import Widget from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty from", "= Vector(*self.velocity) + self.pos # Update - moving the ball", "*= -1.1 # Bounce off left and increase th score", "PongApp(App): def build(self): game = PongGame() game.serve_ball() Clock.schedule_interval(game.update, 1.0 /", "Game # Step 3 - Build the Game # Step", "he screen # on touch_up() - when we lift our", "Create the Game # Step 3 - Build the Game", "th score if self.ball.x < 0: self.ball.velocity_x *= -1 self.player1.score", "touch.x < self.width / 1 / 4: self.player1.center_y = touch.y", "ball = ObjectProperty(None) player1 = ObjectProperty(None) player2 = ObjectProperty(None) def", "(self.ball.y < 0) or (self.ball.y > self.height - 50): self.ball.velocity_y", "# Step 2 - Create the Game # Step 3", "right and increase the score if self.ball.x > self.width -", "class PongBall(Widget): velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity =", "self.player2.bounce_ball(self.ball) def on_touch_move(self, touch): if touch.x < self.width / 1", "it # on_touch_move() - when we drag our finger on", "> self.height - 50): self.ball.velocity_y *= -1.1 # Bounce off", "+ Current Position def move(self): self.pos = Vector(*self.velocity) + self.pos", "# Update - moving the ball by calling the move", "# on touch_down() = When our fingers/mouse touches he screen", "self.ball.velocity_y *= -1.1 # Bounce off left and increase th", "from kivy.app import App from kivy.uix.widget import Widget from kivy.properties", "ball.velocity_x *= -1 print('hello world') class PongBall(Widget): velocity_x = NumericProperty(0)", "touch_down() = When our fingers/mouse touches he screen # on", "self.player2.center_y = touch.y class PongApp(App): def build(self): game = PongGame()", "finger off the screen after touching it # on_touch_move() -", "and increase the score if self.ball.x > self.width - 50:", "from kivy.clock import Clock from random import randint class PongPaddle(Widget):", "move(self): self.pos = Vector(*self.velocity) + self.pos # Update - moving", "4 - Run the App \"\"\" from kivy.app import App", "ball): if self.collide_widget(ball): ball.velocity_x *= -1 print('hello world') class PongBall(Widget):", "Vector(*self.velocity) + self.pos # Update - moving the ball by", "top and bottom Y if (self.ball.y < 0) or (self.ball.y", "Update - moving the ball by calling the move function", "touches he screen # on touch_up() - when we lift", "# Step 3 - Build the Game # Step 4", "velocity_x = NumericProperty(0) velocity_y = NumericProperty(0) velocity = ReferenceListProperty(velocity_x, velocity_y)", "serve_ball(self): self.ball.velocity = Vector(4, 0).rotate(randint(0, 360)) def update(self, dt): self.ball.move()", "bounce_ball(self, ball): if self.collide_widget(ball): ball.velocity_x *= -1 print('hello world') class", "self.pos # Update - moving the ball by calling the", "our finger off the screen after touching it # on_touch_move()", "kivy.vector import Vector from kivy.clock import Clock from random import", "# Step 4 - Run the App \"\"\" from kivy.app", "when we drag our finger on the screen class PongGame(Widget):", "screen class PongGame(Widget): ball = ObjectProperty(None) player1 = ObjectProperty(None) player2", "left and increase th score if self.ball.x < 0: self.ball.velocity_x", "by calling the move function and other stuff # on", "from kivy.vector import Vector from kivy.clock import Clock from random", "self.height - 50): self.ball.velocity_y *= -1.1 # Bounce off left", "def move(self): self.pos = Vector(*self.velocity) + self.pos # Update -", "other stuff # on touch_down() = When our fingers/mouse touches", "When our fingers/mouse touches he screen # on touch_up() -", "kivy.clock import Clock from random import randint class PongPaddle(Widget): score", "Step 4 - Run the App \"\"\" from kivy.app import", "NumericProperty, ReferenceListProperty, ObjectProperty from kivy.vector import Vector from kivy.clock import", "1 self.player1.bounce_ball(self.ball) self.player2.bounce_ball(self.ball) def on_touch_move(self, touch): if touch.x < self.width", "- 50: self.ball.velocity_x *= -1 self.player2.score += 1 self.player1.bounce_ball(self.ball) self.player2.bounce_ball(self.ball)", "Create the App # Step 2 - Create the Game", "update(self, dt): self.ball.move() # Bounce off top and bottom Y", "import App from kivy.uix.widget import Widget from kivy.properties import NumericProperty," ]
[ "self.on_ground = set() self.on_block = {} self.clear = set() def", "self.on_block = {} self.clear = set() def print_relation(self): print(self.on_ground) print(self.on_block)", "SceneRelation: def __init__(self): self.on_ground = set() self.on_block = {} self.clear", "= set() self.on_block = {} self.clear = set() def print_relation(self):", "def __init__(self): self.on_ground = set() self.on_block = {} self.clear =", "class SceneRelation: def __init__(self): self.on_ground = set() self.on_block = {}", "set() self.on_block = {} self.clear = set() def print_relation(self): print(self.on_ground)", "= {} self.clear = set() def print_relation(self): print(self.on_ground) print(self.on_block) print(self.clear)", "__init__(self): self.on_ground = set() self.on_block = {} self.clear = set()" ]
[ "checks if the agent plays their highest card even though", "\".txt\", 'w') as f: for state in self.weights: f.write(\"State: suit", "deals random cards. \"\"\" deck = list(range(52)) def shuffle_cards(): random.shuffle(deck)", "* 13, (suit + 1) * 13)) valid_cards = np.array([i", "== 0: opening_suit = card // 13 hands[player_idx].remove(card) cards_played.append(card) #", "return [deck[0:13], deck[13:26], deck[26:39], deck[39:52]] \"\"\" This function is used", "== self.weights[state][valid_cards].max())) # returns valid_cards[1] = 24 return valid_cards[index_into_valid_cards] \"\"\"", "str(game + 1) ba.write_policy(agents_cards, policy, game_file, states_accessed) return NS_Wins def", "flags based on what the agent played. \"\"\" def assess_card_played(self,", "1,000 # self.alpha = 0.9995 # 10,000 # self.alpha =", "we play 24. ba.add_win(state, card_played) \"\"\" def add_win(self, state, card_played):", "will beat the Diamond 12. '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" State = namedtuple('State', ['opening_suit',", "up to 3 \"Card\"-s: state = State(1, frozenset(23, 0)) We", "= State(opening_suit, frozenset(cards_played), agent_card_played[1]) states_accessed.append(agent_state) card = ba.play_card(agent_state, hands[player_idx]) else:", "13 -> 25 are Diamond 2 -> Diamond 14 Cards", "Agent plays if ba: agent_state = State(opening_suit, frozenset(cards_played), agent_card_played[1]) states_accessed.append(agent_state)", "<NAME> \"\"\" import copy import datetime import numpy as np", "Spade 2 -> Spade 14 Jack is 11 Queen is", "initialize all weights to 1 such that every card has", "partners_cards): if (len(cards_played) == 3 and len(agent_valid_cards) > 1 and", "the game NS_Wins = 0 # used to count total", "card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[0] = card barf.assess_card_played(hands, card, opening_suit,", "random.shuffle(deck) return [deck[0:13], deck[13:26], deck[26:39], deck[39:52]] \"\"\" This function is", "i in cards_in_hand]) if len(valid_cards) == 0: valid_cards = cards_in_hand", "the 52 cards given that it is at that state.", "each round for _ in range(13): cards_played = [] agent_card_played", "# 3 cards played and agent has higher cards, does", "+ str(t) + \".csv\", 'w') as k: k.write(\"game,\" \"agent_wins,random_wins,diff_wins,\" \"agent_rfv_a,agent_rftc_a,\"", "{}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) if state in states_accessed: g.write(\"State: suit", "values at indices 24 and 25 since a Diamond 13", "p += 4 elif card % 13 == 11: p", "Ratios - Random: {average_rf_ratios_random}\") with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Avg_Train-\" + str(t)", "* 13 <= card < (suit + 1) * 13", "This function counts the points in each hand. Note: Ace", "4, 8, 11, 20, 24, 38) The agent choose to", "f\"{rfv[2]},{rftc[2]},\" f\"{rfv_random[0]},{rftc_random[0]},\" f\"{rfv_random[1]},{rftc_random[1]},\" f\"{rfv_random[2]},{rftc_random[2]},\" f\"\\n\") # Cumulative statistics on red", "set(23, 0)) card_played = 24 If 4th card is 25", "play their partner's cards if ba and declarer: agent_state_2 =", "wins. We want to incrase the proportion with which we", "** self.game_num) \"\"\" EXAMPLE state = State(1, set(23, 0)) card_played", "# Agent plays if ba: agent_state = State(opening_suit, frozenset(cards_played), agent_card_played[1])", "): self.RED_FLAG_TOTAL_COUNT[2] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1 if card >", "start the play return False, -1 \"\"\" This function counts", "\"\"\" def count_points(hands): points = [] for hand in hands:", "+= 1 self.ALL_RED_FLAG_VIOLATIONS[0] += 1 \"\"\" This function checks if", "iterations=NUM_GAMES_TEST) end_time = datetime.datetime.now() print(\"Runtime: \", end_time - start_time) #", "weights[state] will likely have very large values at indices 24", "games\") game_summary(ba, False, iterations=NUM_GAMES_TEST) end_time = datetime.datetime.now() print(\"Runtime: \", end_time", "out policy if ba and game == (NUM_GAMES_TRAIN - 1):", "played yet. Clubs is 0 Diamonds is 1 Hearts is", "flush=True) rfv = barf.RED_FLAG_VIOLATIONS rfv_random = barf_random.RED_FLAG_VIOLATIONS rftc = barf.RED_FLAG_TOTAL_COUNT", "play_random_card(opening_suit, hands[player_idx]) # Keep track of the opening suit. if", "len(valid_cards) == 0: return random.choice(cards_in_hand) return random.choice(valid_cards) \"\"\" This function", "can choose any card to play (only some are valid).", "for idx, card in enumerate(cards_played): if suit * 13 <=", "1000 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" RL AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgent:", "idx return max_idx \"\"\" This function determines the declarer based", "and train: ba.add_win(agent_state, agent_card_played[0]) if declarer: ba.add_win(agent_state_2, agent_card_played[1]) NS_Wins +=", "= 1 suit = state.opening_suit # valid_cards = [20, 24]", "agent must play a Diamond if it has Diamonds. In", "= datetime.datetime.now() hands = [] # TRAINING print(f\"TRAINING on {NUM_GAMES_TRAIN}", "for i in all_valid_cards if i in hands[player_idx]]) if suit", "used to count total wins in agent partnership states_accessed =", "\"\"\" This function write the policy at the end of", "100,000 self.alpha = 0.999995 # 1,000,000 # self.alpha = 0.9999995", "x.cards_played: y[max] = -1 max = np.argmax(y) policy.append(max) count +=", "on partnership with the most points. Return: (agent_is_declarer, declarer_idx) \"\"\"", "the opening suit. if player == 0: opening_suit = card", "write the policy at the end of the data training", "range(card_2, 52): for card_partner in [-1, card_1, card_2, card_3]: state", "13) if i in cards_in_hand]) if len(valid_cards) == 0: valid_cards", "write out policy if ba and game == (NUM_GAMES_TRAIN -", "card {}\\nBest Card To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) count", "0.997 # 1,000 # self.alpha = 0.9995 # 10,000 #", "weights to 1 such that every card has an equal", "Cumulative statistics on red flags for every STATS_PER games. barf.clear_red_flags()", "n/a is -1 <-- used in a \"State\" where no", "and len(agent_valid_cards) > 1 and max(agent_valid_cards) > max(valid_cards) and max(valid_cards)", "set(0, 1, 4, 8, 11, 20, 24, 38) The agent", "13 == 11: p += 3 elif card % 13", "Win Delta (want this to be positive): {average_win_delta}\") print(f\"Average Red", "have been played yet. Clubs is 0 Diamonds is 1", "> points[1] + points[3] and points[2] > points[0]: return True,", "in range(iterations): hands = shuffle_cards() NS_Wins[-1] += play_game(game=game, hands=copy.deepcopy(hands), train=True,", "with highest weight. # index_into_valid_counts = 1 since 20 has", "three cards played already and the agent has at least", "> max(agent_valid_cards): self.RED_FLAG_TOTAL_COUNT[0] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1 if card", "\"\"\" def add_win(self, state, card_played): self.weights[state][card_played] *= (1 + 0.1", "\", end_time - start_time) # runtime if __name__ == \"__main__\":", "East's cards hands[2] = Agent's cards hands[3] = West's cards", "24] valid_cards = np.array([i for i in range(suit * 13,", "t, iterations=NUM_GAMES_TRAIN): with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t) + \".csv\",", "0)), weights[state] will likely have very large values at indices", "52 # Agent can choose any card to play (only", "highest weight. card_played = ba.play_card(state, cards_in_hand) \"\"\" def play_card(self, state,", "played. \"\"\" def higher_card(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards): if", "(d + 1) % 4 # the person to the", "0: return random.choice(cards_in_hand) return random.choice(valid_cards) \"\"\" This function determines the", "in range(52): for card_2 in range(card_1, 52): for card_3 in", "the highest weight. card_played = ba.play_card(state, cards_in_hand) \"\"\" def play_card(self,", "10000 STATS_PER = 1000 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" RL AGENT \"", "state.cards_played, state.partners_card, policy[count])) count += 1 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" UTILITY", "phase. \"\"\" def write_policy(self, cards_in_hand, policy, filename, states_accessed): count =", "{} self.weights[State(-1, frozenset(), -1)] = np.full(NUM_ACTIONS, 1.0) for opening_suit in", "in hands: p = 0 for card in hand: if", "and agent has higher cards, does it play highest card", "are Heart 2 -> Heart 14 Cards 39 -> 51", "card in enumerate(cards_played): if suit * 13 <= card <", "OF BRIDGE AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgentRedFlags: def __init__(self): self.RED_FLAG_VIOLATIONS", "{average_rf_ratios_agent}\") print(f\"Average Red Flag Ratios - Random: {average_rf_ratios_random}\") with open(str(NUM_GAMES_TRAIN)", "it has Diamonds. In this example, agent will most likely", "= \"Bridge_\" + str(game + 1) ba.write_policy(agents_cards, policy, game_file, states_accessed)", "To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) count += 1 \"\"\"'''''''''''''''''''''''''''''''''''''''''''''''''''''''''", "24 and 25 since a Diamond 13 and Diamond 14", "and outputs a winner. \"\"\" def play_game(game, hands, train=False, ba=None,", "20, 24, 38) The agent choose to play whichever remaining", "declarer, they play their partner's cards if ba and declarer:", "round. \"\"\" def determine_round_winner(suit, cards_played): max_idx = -1 max_val =", "in cards_in_hand] if len(valid_cards) == 0: return random.choice(cards_in_hand) return random.choice(valid_cards)", "declarer based on partnership with the most points. Return: (agent_is_declarer,", "don't change the weights here print(f\"TESTING on {NUM_GAMES_TEST} games\") game_summary(ba,", "\" TRACKS PERFORMANCE OF BRIDGE AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgentRedFlags:", "for x in ba.weights: y = copy.deepcopy(ba.weights[x]) max = np.argmax(y)", "2 -> Spade 14 Jack is 11 Queen is 12", "f\"\\n\") # Cumulative statistics on red flags for every STATS_PER", "== 0: valid_cards = cards_in_hand # Choose the valid card", "copy.deepcopy(ba.weights[x]) max = np.argmax(y) while max in x.cards_played: y[max] =", "card and must play a Diamond if it has one.", "on what the agent played. \"\"\" def assess_card_played(self, hands, card,", "remaining card has the highest weight. The agent must play", "of being chosen. self.weights = {} self.weights[State(-1, frozenset(), -1)] =", "'''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" State = namedtuple('State', ['opening_suit', 'cards_played', 'partners_card']) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \"", "is -1 <-- used in a \"State\" where no cards", "to an array of length-52: We call this Map \"weights\".", "games\") ba = BridgeAgent() ba = game_summary(ba, True) # TESTING", "p += 3 elif card % 13 == 10: p", "1 self.ALL_RED_FLAG_VIOLATIONS[0] += 1 \"\"\" This function checks if the", "+ 1) * 13)) valid_cards = np.array([i for i in", "== -1: return # highest card played so far is", "Diamond 12, Club 2 <-- first 2 cards in round", "state = State(1, frozenset(23, 0)) We have a Diamond 12", "function is used by non-agents who play randomly. \"\"\" def", "Diamonds is 1 Hearts is 2 Spades is 3 Representing", "For each round for _ in range(13): cards_played = []", "0.9999995 # 5,000,000 self.game_num = 1 \"\"\" EXAMPLE state =", "play_game(game, hands, train=False, ba=None, barf=None): partners_cards = copy.copy(hands[0]) agents_cards =", "= np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) \"\"\" This function checks if", "theirs. \"\"\" def highest_card(self, valid_cards, agent_valid_cards, card): if len(agent_valid_cards) >", "function checks if the agent plays a higher card even", "= list(range(52)) def shuffle_cards(): random.shuffle(deck) return [deck[0:13], deck[13:26], deck[26:39], deck[39:52]]", "datetime.datetime.now() print(\"Runtime: \", end_time - start_time) # runtime if __name__", "TRACKS PERFORMANCE OF BRIDGE AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgentRedFlags: def", "a \"State\" where no cards have been played yet. Clubs", "= barf_random.RED_FLAG_TOTAL_COUNT with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t) + \".csv\",", "opening_suit = card // 13 hands[player_idx].remove(card) cards_played.append(card) # Get the", "if len(valid_cards) == 0: valid_cards = cards_in_hand # Choose the", "and max(agent_valid_cards) > max(valid_cards) and max(valid_cards) not in partners_cards ):", "Choose the valid card with highest weight. # index_into_valid_counts =", "the agent wins. We want to incrase the proportion with", "a card in order starting from round_winner for player in", "+= 1 points.append(p) return points \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" TRACKS PERFORMANCE", "[i for i in range(suit * 13, (suit + 1)", "all weights to 1 such that every card has an", "agent_valid_cards = np.array([i for i in all_valid_cards if i in", "self.ALL_RED_FLAG_VIOLATIONS[0] += 1 \"\"\" This function checks if the agent", "1 self.ALL_RED_FLAG_VIOLATIONS[2] += 1 \"\"\" This function checks for any", "not in partners_cards ): self.RED_FLAG_TOTAL_COUNT[1] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1", "card == max(agent_valid_cards): self.RED_FLAG_VIOLATIONS[0] += 1 self.ALL_RED_FLAG_VIOLATIONS[0] += 1 \"\"\"", "card, cards_played, partners_cards): if (len(cards_played) == 3 and len(agent_valid_cards) >", "10: p += 2 elif card % 13 == 9:", "valid_cards = np.array([i for i in all_valid_cards if i in", "\"agent_wins,random_wins,diff_wins,\" \"agent_rfv_a,agent_rftc_a,\" \"agent_rfv_b,agent_rftc_b,\" \"agent_rfv_c,agent_rftc_c,\" \"random_rfv_a,random_rftc_a,\" \"random_rfv_b,random_rftc_b,\" \"random_rfv_c,random_rftc_c\\n\") barf = BridgeAgentRedFlags()", "+ 0.1 * self.alpha ** self.game_num) \"\"\" EXAMPLE state =", "card has an equal chance of being chosen. self.weights =", "def higher_card(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards): if (len(cards_played) ==", "11: p += 3 elif card % 13 == 10:", "barf.clear_red_flags() barf_random.clear_red_flags() NS_Wins.append(0) NS_Wins_random.append(0) average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins) -", "\"agent_rfv_a,agent_rftc_a,\" \"agent_rfv_b,agent_rftc_b,\" \"agent_rfv_c,agent_rftc_c,\" \"random_rfv_a,random_rftc_a,\" \"random_rfv_b,random_rftc_b,\" \"random_rfv_c,random_rftc_c\\n\") barf = BridgeAgentRedFlags() barf_random", "round_winner = (determine_round_winner(opening_suit, cards_played) + round_winner) % 4 # Adjust", "points[1] + points[3] and points[2] > points[0]: return True, 2", "their highest card even though the highest card already played", "EXAMPLE state = State(1, set(23, 0)) card_played = 24 If", "cards_played = [] agent_card_played = [-1, -1] agent_state = None", "The agent is 3rd to play a card and must", "/ ((len(NS_Wins) - 1) * STATS_PER) average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT)", "f\"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\\n\") return ba def main(): start_time = datetime.datetime.now() hands =", "-> Diamond 14 Cards 26 -> 38 are Heart 2", "np.array([i for i in all_valid_cards if i in hands[player_idx]]) if", "== max(agent_valid_cards): self.RED_FLAG_VIOLATIONS[0] += 1 self.ALL_RED_FLAG_VIOLATIONS[0] += 1 \"\"\" This", "Agent can choose any card to play (only some are", "of points in each hand # agent's partnership has more", "determines the declarer based on partnership with the most points.", "= np.array([i for i in all_valid_cards if i in hands[player_idx]])", "with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t) + \".csv\", 'a') as", "This function plays 13 rounds of 1 NT bridge and", "2 # agent is not declarer and agent should start", "0)) # Diamond 12, Club 2 <-- first 2 cards", "= -1 for idx, card in enumerate(cards_played): if suit *", "= State(1, set(23, 0)) # Diamond 12, Club 2 <--", "= None player_idx = (round_winner + player) % 4 if", "agent_state_2 = None opening_suit = -1 # Each player plays", "that every card has an equal chance of being chosen.", "declarer starts the game NS_Wins = 0 # used to", "<-- first 2 cards in round card_played = 24 #", "A SINGLE GAME OF BRIDGE \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This function", "__init__(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3)", "-1 max_val = -1 for idx, card in enumerate(cards_played): if", "though their partner is guaranteed to win. \"\"\" def partner_win(self,", "\"\"\" round_winner = (d + 1) % 4 # the", "function checks for any red flags based on what the", "= -1 max_val = -1 for idx, card in enumerate(cards_played):", "self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) \"\"\" This function checks", "if card == max(agent_valid_cards): self.RED_FLAG_VIOLATIONS[0] += 1 self.ALL_RED_FLAG_VIOLATIONS[0] += 1", "\" \" RL AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgent: def __init__(self):", "9: p += 1 points.append(p) return points \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \"", "card // 13 hands[player_idx].remove(card) cards_played.append(card) # Get the winning card.", "+ partner has played highest card, does agent play lowest", "declarer: ba.add_win(agent_state_2, agent_card_played[1]) NS_Wins += 1 else: if ba and", "+ points[2] > points[1] + points[3] and points[2] > points[0]:", "i in cards_played]) agent_valid_cards = np.array([i for i in all_valid_cards", "False, iterations=NUM_GAMES_TEST) end_time = datetime.datetime.now() print(\"Runtime: \", end_time - start_time)", "since 20 has a smaller weight than 24. # index_into_valid_cards", "cards have been played yet. Clubs is 0 Diamonds is", "max(valid_cards) in partners_cards ): self.RED_FLAG_TOTAL_COUNT[2] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1", "= shuffle_cards() NS_Wins[-1] += play_game(game=game, hands=copy.deepcopy(hands), train=True, ba=ba, barf=barf) NS_Wins_random[-1]", "BridgeAgent or N wins. if round_winner == 0 or round_winner", "np.argmax(y) policy.append(max) count += 1 game_file = \"Bridge_\" + str(game", "round_winner) % 4 # Adjust the BridgeAgent weights. # If", "\"Card\"-s: state = State(1, frozenset(23, 0)) We have a Diamond", "__init__(self): # We initialize all weights to 1 such that", "= State(1, set(23, 0)) cards_in_hand = set(0, 1, 4, 8,", "policy = [] count = 0 for x in ba.weights:", "# self.alpha = 0.9995 # 10,000 # self.alpha = 0.99995", "partner_win(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards): if (len(cards_played) == 3", "and len(agent_valid_cards) > 1 and max(valid_cards) in partners_cards ): self.RED_FLAG_TOTAL_COUNT[2]", "else: # Random bot plays card = play_random_card(opening_suit, hands[player_idx]) #", "it play highest card or highest necessary card? self.higher_card(valid_cards, agent_valid_cards,", "function plays 13 rounds of 1 NT bridge and outputs", "hands[player_idx].remove(card) cards_played.append(card) # Get the winning card. round_winner = (determine_round_winner(opening_suit,", "Return: (agent_is_declarer, declarer_idx) \"\"\" def agent_declarer(hands): points = count_points(hands) #", "if it has one. Representing the MDP with a Map", "= None agent_state_2 = None opening_suit = -1 # Each", "= card, idx return max_idx \"\"\" This function determines the", "card_played = 24 # Diamond 13 <-- 3rd card in", "24, 38) The agent choose to play whichever remaining card", "ba.play_card(state, cards_in_hand) \"\"\" def play_card(self, state, cards_in_hand): # Following the", "# index_into_valid_counts = 1 since 20 has a smaller weight", "# TRAINING print(f\"TRAINING on {NUM_GAMES_TRAIN} games\") ba = BridgeAgent() ba", "if len(valid_cards) == 0: return random.choice(cards_in_hand) return random.choice(valid_cards) \"\"\" This", "1 NT bridge and outputs a winner. \"\"\" def play_game(game,", "iterations=NUM_GAMES_TRAIN): with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t) + \".csv\", 'w')", "valid_cards, agent_valid_cards, card): if len(agent_valid_cards) > 1 and max(valid_cards) >", "and points[2] > points[0]: return True, 2 # agent is", "set(23, 0)), weights[state] will likely have very large values at", "\"_Game_Data_Train-\" + str(t) + \".csv\", 'a') as k: k.write( f\"{game", "= State(1, set(23, 0)) card_played = 24 If 4th card", "24 return valid_cards[index_into_valid_cards] \"\"\" This function write the policy at", "This function write the policy at the end of the", "self.RED_FLAG_TOTAL_COUNT = np.zeros(3) self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3) # Cumulative self.ALL_RED_FLAG_TOTAL_COUNT =", "have a Diamond 12 and Club 2 with an opening", "State(1, set(23, 0)) cards_in_hand = set(0, 1, 4, 8, 11,", "start_time = datetime.datetime.now() hands = [] # TRAINING print(f\"TRAINING on", "player_idx, partners_cards): all_valid_cards = list(range(suit * 13, (suit + 1)", "card already played is higher than theirs. \"\"\" def highest_card(self,", "cards_in_hand # Choose the valid card with highest weight. #", "checks for any red flags based on what the agent", "(suit + 1) * 13 and card > max_val: max_val,", "and frozenset of up to 3 \"Card\"-s: state = State(1,", "- 1) * STATS_PER) average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT) average_rf_ratios_random =", "DEFINE SOME CONSTANTS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" NUM_ACTIONS = 52 # Agent", "play 24. ba.add_win(state, card_played) \"\"\" def add_win(self, state, card_played): self.weights[state][card_played]", "12, Club 2 <-- first 2 cards in round card_played", "already and the agent has at least one higher card", "or highest necessary card? self.higher_card(valid_cards, agent_valid_cards, card, cards_played, partners_cards) #", "game NS_Wins = 0 # used to count total wins", "the highest weight. The agent must play a Diamond if", "equal chance of being chosen. self.weights = {} self.weights[State(-1, frozenset(),", "# self.alpha = 0.9999995 # 5,000,000 self.game_num = 1 \"\"\"", "card % 13 == 11: p += 3 elif card", "at indices 24 and 25 since a Diamond 13 and", "\" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgentRedFlags: def __init__(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT", "state.cards_played, state.partners_card, policy[count])) if state in states_accessed: g.write(\"State: suit {}", "N wins. if round_winner == 0 or round_winner == 2:", "\"\"\" EXAMPLE state = State(1, set(23, 0)) card_played = 24", "partner's card {}\\nBest Card To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count]))", "14 Jack is 11 Queen is 12 King is 13", "hands, card, suit, cards_played, player_idx, partners_cards): all_valid_cards = list(range(suit *", "52): for card_partner in [-1, card_1, card_2, card_3]: state =", "2 cards in round card_played = 24 # Diamond 13", "card has the highest weight. The agent must play a", "points = [] for hand in hands: p = 0", "of the opening suit. if player == 0: opening_suit =", "< (suit + 1) * 13 and card > max_val:", "declarer, d = agent_declarer(hands) \"\"\" hands[0] = North's cards hands[1]", "and max(valid_cards) not in partners_cards ): self.RED_FLAG_TOTAL_COUNT[1] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[1]", "partners_cards) # 3 cards played + partner has played highest", "| cards played {} | partner's card {}\\nBest Card To", "np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3) # Cumulative self.ALL_RED_FLAG_TOTAL_COUNT", "agent choose to play whichever remaining card has the highest", "weights here print(f\"TESTING on {NUM_GAMES_TEST} games\") game_summary(ba, False, iterations=NUM_GAMES_TEST) end_time", "function write the policy at the end of the data", "play whichever remaining card has the highest weight. The agent", "then the agent wins. We want to incrase the proportion", "(determine_round_winner(opening_suit, cards_played) + round_winner) % 4 # Adjust the BridgeAgent", "cards_in_hand]) if len(valid_cards) == 0: valid_cards = cards_in_hand # Choose", "the data training phase. \"\"\" def write_policy(self, cards_in_hand, policy, filename,", "_ in range(13): cards_played = [] agent_card_played = [-1, -1]", "TESTING -- we don't change the weights here print(f\"TESTING on", "Queen is 12 King is 13 Ace is 14 Representing", "will most likely play 24, which beats 23 <-- hopefully", "each hand # agent's partnership has more points and agent", "NS_Wins_random[-1]},\" f\"{rfv[0]},{rftc[0]},\" f\"{rfv[1]},{rftc[1]},\" f\"{rfv[2]},{rftc[2]},\" f\"{rfv_random[0]},{rftc_random[0]},\" f\"{rfv_random[1]},{rftc_random[1]},\" f\"{rfv_random[2]},{rftc_random[2]},\" f\"\\n\") # Cumulative", "In this example, with state = (1, set(23, 0)), weights[state]", "player_idx == 2: # Agent plays if ba: agent_state =", "in all_valid_cards if i in cards_played]) agent_valid_cards = np.array([i for", "agent loses. We want to decrease the proportion with which", "(1 + 0.1 * self.alpha ** self.game_num) \"\"\" EXAMPLE state", "highest card even though the highest card already played is", "else: if ba and train: ba.add_loss(agent_state, agent_card_played[0]) if declarer: ba.add_loss(agent_state_2,", "policy at the end of the data training phase. \"\"\"", "\"\"\" This function determines the winner of the round. \"\"\"", "1 self.ALL_RED_FLAG_VIOLATIONS[1] += 1 \"\"\" This function checks if the", "print(f\"Average Red Flag Ratios - Random: {average_rf_ratios_random}\") with open(str(NUM_GAMES_TRAIN) +", "25, 38, 51 \"\"\" def count_points(hands): points = [] for", "BridgeAgent() ba = game_summary(ba, True) # TESTING -- we don't", "game_summary(ba, t, iterations=NUM_GAMES_TRAIN): with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t) +", "this Map \"weights\". And the array of length-52 represets the", "12 and Club 2 with an opening suit of Diamonds.", "'''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" NUM_ACTIONS = 52 # Agent can choose any card", "+ 1) * 13 and card > max_val: max_val, max_idx", "to be positive): {average_win_delta}\") print(f\"Average Red Flag Ratios - Agent:", "= 1000 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" RL AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class", "partner is guaranteed to win. \"\"\" def partner_win(self, valid_cards, agent_valid_cards,", "range(iterations): hands = shuffle_cards() NS_Wins[-1] += play_game(game=game, hands=copy.deepcopy(hands), train=True, ba=ba,", "[0] for game in range(iterations): hands = shuffle_cards() NS_Wins[-1] +=", "hands=copy.deepcopy(hands), train=True, ba=ba, barf=barf) NS_Wins_random[-1] += play_game(game=game, hands=hands, ba=None, barf=barf_random)", "NS_Wins_random.append(0) average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins) - 1) * STATS_PER)", "if it has Diamonds. In this example, agent will most", "higher than agent's highest card self.highest_card(valid_cards, agent_valid_cards, card) # 3", "are Club 2 -> Club 14 Cards 13 -> 25", "12 King is 13 Ace is 14 Representing a \"Suit\"", "State(opening_suit, frozenset(cards_played), agent_card_played[0]) states_accessed.append(agent_state_2) card = ba.play_card(agent_state_2, hands[player_idx]) barf.assess_card_played(hands, card,", "agent_card_played[0]) states_accessed.append(agent_state_2) card = ba.play_card(agent_state_2, hands[player_idx]) barf.assess_card_played(hands, card, opening_suit, cards_played,", "determine_round_winner(suit, cards_played): max_idx = -1 max_val = -1 for idx,", "str(t) + \".csv\", 'a') as k: k.write( f\"{game + 1},\"", "1) * STATS_PER) average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT) average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS,", "EXAMPLE: # suit = 1 suit = state.opening_suit # valid_cards", "= card barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards) elif player_idx", "at the end of the data training phase. \"\"\" def", "# Cumulative def clear_red_flags(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3)", "self.game_num) \"\"\" EXAMPLE state = State(1, set(23, 0)) cards_in_hand =", "game, determine and write out policy if ba and game", "card, cards_played, partners_cards) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" PLAY A SINGLE GAME", "Random: {average_rf_ratios_random}\") with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Avg_Train-\" + str(t) + \".csv\",", "= 1 since 20 has a smaller weight than 24.", "an equal chance of being chosen. self.weights = {} self.weights[State(-1,", "a Map from a \"State\" to an array of length-52:", "if ba and train: ba.add_win(agent_state, agent_card_played[0]) if declarer: ba.add_win(agent_state_2, agent_card_played[1])", "card_played): self.weights[state][card_played] *= (1 + 0.1 * self.alpha ** self.game_num)", "cards. \"\"\" deck = list(range(52)) def shuffle_cards(): random.shuffle(deck) return [deck[0:13],", "\" \" PLAY A SINGLE GAME OF BRIDGE \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\"", "> max(valid_cards): self.RED_FLAG_VIOLATIONS[2] += 1 self.ALL_RED_FLAG_VIOLATIONS[2] += 1 \"\"\" This", "* 13, (suit + 1) * 13) if i in", "cards_played, player_idx, partners_cards) elif player_idx == 0: # if agent", "guaranteed to win. \"\"\" def partner_win(self, valid_cards, agent_valid_cards, card, cards_played,", "OF BRIDGE Representing a \"Card\" as an integer: Cards 0", "plays if ba: agent_state = State(opening_suit, frozenset(cards_played), agent_card_played[1]) states_accessed.append(agent_state) card", "game == (NUM_GAMES_TRAIN - 1): policy = [] count =", "with open(filename + \"_Last_Game.txt\", 'w') as g: g.write(\"Cards in Hand:", "State(1, set(23, 0)) # Diamond 12, Club 2 <-- first", "card_3]), card_partner) self.weights[state] = np.full(NUM_ACTIONS, 1.0) # self.alpha = 0.997", "is 25 (Diamond 14), then the agent loses. We want", "valid card with highest weight. # index_into_valid_counts = 1 since", "plays card = play_random_card(opening_suit, hands[player_idx]) # Keep track of the", "hand in hands: p = 0 for card in hand:", "12, 25, 38, 51 \"\"\" def count_points(hands): points = []", "in each hand # agent's partnership has more points and", "frozenset(cards_played), agent_card_played[0]) states_accessed.append(agent_state_2) card = ba.play_card(agent_state_2, hands[player_idx]) barf.assess_card_played(hands, card, opening_suit,", "for the last game, determine and write out policy if", "STATS_PER) average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT) average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT) print(f\"Average", "datetime import numpy as np import random from collections import", "Map from a \"State\" to an array of length-52: We", "# Cumulative self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3) # Cumulative def clear_red_flags(self): self.RED_FLAG_VIOLATIONS", "The agent choose to play whichever remaining card has the", "# highest card played so far is higher than agent's", "agent is not declarer and agent should start the play", "based on partnership with the most points. Return: (agent_is_declarer, declarer_idx)", "self.weights = {} self.weights[State(-1, frozenset(), -1)] = np.full(NUM_ACTIONS, 1.0) for", "we play 24. ba.add_loss(state, card_played) \"\"\" def add_loss(self, state, card_played):", "NUM_ACTIONS = 52 # Agent can choose any card to", "a Diamond 12 and Club 2 with an opening suit", "self.weights[state][valid_cards].max())) # returns valid_cards[1] = 24 return valid_cards[index_into_valid_cards] \"\"\" This", "rounds of 1 NT bridge and outputs a winner. \"\"\"", "+ round_winner) % 4 # Adjust the BridgeAgent weights. #", "-> 38 are Heart 2 -> Heart 14 Cards 39", "BridgeAgent: def __init__(self): # We initialize all weights to 1", "assess_card_played(self, hands, card, suit, cards_played, player_idx, partners_cards): all_valid_cards = list(range(suit", "3 cards played and agent has higher cards, does it", "necessary card? self.higher_card(valid_cards, agent_valid_cards, card, cards_played, partners_cards) # 3 cards", "+= play_game(game=game, hands=copy.deepcopy(hands), train=True, ba=ba, barf=barf) NS_Wins_random[-1] += play_game(game=game, hands=hands,", "+ \"_Game_Data_Avg_Train-\" + str(t) + \".csv\", 'w') as m: m.write(f\"avg_win_delta,avg_rf_agent,avg_rf_random\\n\"", "1) % 4 # the person to the right of", "# records which states have been updated for this game", "the array of length-52 represets the proportion with which the", "+ 1) * 13) if i in cards_in_hand]) if len(valid_cards)", "end_time = datetime.datetime.now() print(\"Runtime: \", end_time - start_time) # runtime", "opening_suit, cards_played, player_idx, partners_cards) elif player_idx == 0: # if", "1 points.append(p) return points \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" TRACKS PERFORMANCE OF", "= list(range(suit * 13, (suit + 1) * 13)) valid_cards", "self.weights[state][card_played] /= (1 + 0.1 * self.alpha ** self.game_num) \"\"\"", "> 1 and max(valid_cards) in partners_cards ): self.RED_FLAG_TOTAL_COUNT[2] += 1", "\"random_rfv_b,random_rftc_b,\" \"random_rfv_c,random_rftc_c\\n\") barf = BridgeAgentRedFlags() barf_random = BridgeAgentRedFlags() NS_Wins =", "then the agent loses. We want to decrease the proportion", "+ 1) ba.write_policy(agents_cards, policy, game_file, states_accessed) return NS_Wins def game_summary(ba,", "k: k.write(\"game,\" \"agent_wins,random_wins,diff_wins,\" \"agent_rfv_a,agent_rftc_a,\" \"agent_rfv_b,agent_rftc_b,\" \"agent_rfv_c,agent_rftc_c,\" \"random_rfv_a,random_rftc_a,\" \"random_rfv_b,random_rftc_b,\" \"random_rfv_c,random_rftc_c\\n\") barf", "f\"{game + 1},\" f\"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},\" f\"{rfv[0]},{rftc[0]},\" f\"{rfv[1]},{rftc[1]},\" f\"{rfv[2]},{rftc[2]},\" f\"{rfv_random[0]},{rftc_random[0]},\"", "the proportion with which we play 24. ba.add_loss(state, card_played) \"\"\"", "max_idx \"\"\" This function determines the declarer based on partnership", "enumerate(cards_played): if suit * 13 <= card < (suit +", "than 24. # index_into_valid_cards = np.argmax(self.weights[state][valid_cards]) index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards] ==", "if len(agent_valid_cards) > 1 and max(valid_cards) > max(agent_valid_cards): self.RED_FLAG_TOTAL_COUNT[0] +=", "Diamonds. The agent is 3rd to play a card and", "i in range(suit * 13, (suit + 1) * 13)", "max_val: max_val, max_idx = card, idx return max_idx \"\"\" This", "ba.write_policy(agents_cards, policy, game_file, states_accessed) return NS_Wins def game_summary(ba, t, iterations=NUM_GAMES_TRAIN):", "played and agent has higher cards, does it play highest", "+= 1 else: if ba and train: ba.add_loss(agent_state, agent_card_played[0]) if", "is 12 King is 13 Ace is 14 Representing a", "13, (suit + 1) * 13) if i in cards_in_hand]", "\"\"\" def partner_win(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards): if (len(cards_played)", "card_played = ba.play_card(state, cards_in_hand) \"\"\" def play_card(self, state, cards_in_hand): #", "one higher card than what's been played. \"\"\" def higher_card(self,", "the BridgeAgent weights. # If the BridgeAgent or N wins.", "1 if card > max(valid_cards): self.RED_FLAG_VIOLATIONS[2] += 1 self.ALL_RED_FLAG_VIOLATIONS[2] +=", "than agent's highest card self.highest_card(valid_cards, agent_valid_cards, card) # 3 cards", "i in cards_in_hand] if len(valid_cards) == 0: return random.choice(cards_in_hand) return", "2 Spades is 3 Representing a \"State\" as an opening", "game in range(iterations): hands = shuffle_cards() NS_Wins[-1] += play_game(game=game, hands=copy.deepcopy(hands),", "the last game, determine and write out policy if ba", "all_valid_cards if i in hands[player_idx]]) if suit == -1: return", "3 Representing a \"State\" as an opening suit and frozenset", "np.zeros(3) # Cumulative def clear_red_flags(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT =", "played {} | partner's card {}\\nBest Card To Play: {}\\n\\n\".format(state.opening_suit,", "+= 1 if card > max(valid_cards): self.RED_FLAG_VIOLATIONS[2] += 1 self.ALL_RED_FLAG_VIOLATIONS[2]", "UTILITY FUNCTIONS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This functions deals random cards.", "\", end=\"\", flush=True) rfv = barf.RED_FLAG_VIOLATIONS rfv_random = barf_random.RED_FLAG_VIOLATIONS rftc", "== 0: print(f\"{game + 1} / \", end=\"\", flush=True) rfv", "= (d + 1) % 4 # the person to", "\"\"\" This function is used by non-agents who play randomly.", "State(1, frozenset(23, 0)) We have a Diamond 12 and Club", "= play_random_card(opening_suit, hands[player_idx]) # Keep track of the opening suit.", "Diamond if it has Diamonds. In this example, agent will", "2 -> Club 14 Cards 13 -> 25 are Diamond", "-> Club 14 Cards 13 -> 25 are Diamond 2", "while max in x.cards_played: y[max] = -1 max = np.argmax(y)", "namedtuple('State', ['opening_suit', 'cards_played', 'partners_card']) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" DEFINE SOME CONSTANTS", "= 10000 STATS_PER = 1000 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" RL AGENT", "one. Representing the MDP with a Map from a \"State\"", "self.RED_FLAG_VIOLATIONS[1] += 1 self.ALL_RED_FLAG_VIOLATIONS[1] += 1 \"\"\" This function checks", "bridge and outputs a winner. \"\"\" def play_game(game, hands, train=False,", "= 24 # Diamond 13 <-- 3rd card in round", "state, card_played): self.weights[state][card_played] /= (1 + 0.1 * self.alpha **", "by non-agents who play randomly. \"\"\" def play_random_card(suit, cards_in_hand): if", "Clubs is 0 Diamonds is 1 Hearts is 2 Spades", "in round If 4th card is not 25, then the", "card > max_val: max_val, max_idx = card, idx return max_idx", "hands[3] = West's cards \"\"\" round_winner = (d + 1)", "Note: Ace is 12, 25, 38, 51 \"\"\" def count_points(hands):", "the valid card with highest weight. # index_into_valid_counts = 1", "hands[0] = North's cards hands[1] = East's cards hands[2] =", "# Keep track of the opening suit. if player ==", "function checks if the agent wins a round when there's", "else: card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[0] = card barf.assess_card_played(hands, card,", "np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) \"\"\" This function checks if the", "= East's cards hands[2] = Agent's cards hands[3] = West's", "such that every card has an equal chance of being", "been played yet. Clubs is 0 Diamonds is 1 Hearts", "2 -> Diamond 14 Cards 26 -> 38 are Heart", "Diamonds. In this example, agent will most likely play 24,", "+= 2 elif card % 13 == 9: p +=", "card_played) \"\"\" def add_loss(self, state, card_played): self.weights[state][card_played] /= (1 +", "+ 1} / \", end=\"\", flush=True) rfv = barf.RED_FLAG_VIOLATIONS rfv_random", "% 4 if player_idx == 2: # Agent plays if", "should start the play return False, -1 \"\"\" This function", "game_file, states_accessed) return NS_Wins def game_summary(ba, t, iterations=NUM_GAMES_TRAIN): with open(str(NUM_GAMES_TRAIN)", "checks if the agent plays a higher card even though", "card_2, card_3]: state = State( opening_suit, frozenset([card_1, card_2, card_3]), card_partner)", "range(4): card = None player_idx = (round_winner + player) %", "agent is declarer, they play their partner's cards if ba", "# For each round for _ in range(13): cards_played =", "card_played): self.weights[state][card_played] /= (1 + 0.1 * self.alpha ** self.game_num)", "points[0] + points[2] > points[1] + points[3] and points[2] >", "ba and train: ba.add_loss(agent_state, agent_card_played[0]) if declarer: ba.add_loss(agent_state_2, agent_card_played[1]) #", "def partner_win(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards): if (len(cards_played) ==", "card. round_winner = (determine_round_winner(opening_suit, cards_played) + round_winner) % 4 #", "f: for state in self.weights: f.write(\"State: suit {} | cards", "EXAMPLE state = State(1, set(23, 0)) cards_in_hand = set(0, 1,", "function counts the points in each hand. Note: Ace is", "card? self.higher_card(valid_cards, agent_valid_cards, card, cards_played, partners_cards) # 3 cards played", "agent wins a round when there's three cards played already", "* 13 and card > max_val: max_val, max_idx = card,", "# Get the winning card. round_winner = (determine_round_winner(opening_suit, cards_played) +", "import copy import datetime import numpy as np import random", "state.opening_suit # valid_cards = [20, 24] valid_cards = np.array([i for", "least one higher card than what's been played. \"\"\" def", "0 # used to count total wins in agent partnership", "\"\"\" def add_loss(self, state, card_played): self.weights[state][card_played] /= (1 + 0.1", "np.zeros(3) # Cumulative self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3) # Cumulative def clear_red_flags(self):", "= North's cards hands[1] = East's cards hands[2] = Agent's", "= 24 return valid_cards[index_into_valid_cards] \"\"\" This function write the policy", "is not 25, then the agent wins. We want to", "valid). NUM_GAMES_TRAIN = 10000 NUM_GAMES_TEST = 10000 STATS_PER = 1000", "played + partner has played highest card, does agent play", "-1 for idx, card in enumerate(cards_played): if suit * 13", "# Following the EXAMPLE: # suit = 1 suit =", "max_idx = card, idx return max_idx \"\"\" This function determines", "since a Diamond 13 and Diamond 14 will beat the", "\"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' REPRESENTATIONS OF BRIDGE Representing a \"Card\" as an integer:", "at that state. In this example, with state = (1,", "copy.copy(hands[2]) declarer, d = agent_declarer(hands) \"\"\" hands[0] = North's cards", "should play each of the 52 cards given that it", "states_accessed: g.write(\"State: suit {} | cards played {} | partner's", "'w') as k: k.write(\"game,\" \"agent_wins,random_wins,diff_wins,\" \"agent_rfv_a,agent_rftc_a,\" \"agent_rfv_b,agent_rftc_b,\" \"agent_rfv_c,agent_rftc_c,\" \"random_rfv_a,random_rftc_a,\" \"random_rfv_b,random_rftc_b,\"", "no cards have been played yet. Clubs is 0 Diamonds", "<-- used in a \"State\" where no cards have been", "has one. Representing the MDP with a Map from a", "if ba and declarer: agent_state_2 = State(opening_suit, frozenset(cards_played), agent_card_played[0]) states_accessed.append(agent_state_2)", "MDP with a Map from a \"State\" to an array", "13) if i in cards_in_hand] if len(valid_cards) == 0: return", "-> 51 are Spade 2 -> Spade 14 Jack is", "2 -> Heart 14 Cards 39 -> 51 are Spade", "frozenset(), -1)] = np.full(NUM_ACTIONS, 1.0) for opening_suit in range(4): for", "= 0.9999995 # 5,000,000 self.game_num = 1 \"\"\" EXAMPLE state", "f\"{rfv_random[0]},{rftc_random[0]},\" f\"{rfv_random[1]},{rftc_random[1]},\" f\"{rfv_random[2]},{rftc_random[2]},\" f\"\\n\") # Cumulative statistics on red flags", "cards_played, partners_cards): if (len(cards_played) == 3 and len(agent_valid_cards) > 1", "valid_cards, agent_valid_cards, card, cards_played, partners_cards): if (len(cards_played) == 3 and", "to incrase the proportion with which we play 24. ba.add_win(state,", "ba.weights: y = copy.deepcopy(ba.weights[x]) max = np.argmax(y) while max in", "is not declarer and agent should start the play return", "p = 0 for card in hand: if card %", "that state. In this example, with state = (1, set(23,", "Hearts is 2 Spades is 3 Representing a \"State\" as", "and Diamond 14 will beat the Diamond 12. '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" State", "= namedtuple('State', ['opening_suit', 'cards_played', 'partners_card']) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" DEFINE SOME", "partners_cards = copy.copy(hands[0]) agents_cards = copy.copy(hands[2]) declarer, d = agent_declarer(hands)", "who play randomly. \"\"\" def play_random_card(suit, cards_in_hand): if suit ==", "SINGLE GAME OF BRIDGE \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This function plays", "The agent must play a Diamond if it has Diamonds.", "return max_idx \"\"\" This function determines the declarer based on", "winner of the round. \"\"\" def determine_round_winner(suit, cards_played): max_idx =", "\".csv\", 'w') as k: k.write(\"game,\" \"agent_wins,random_wins,diff_wins,\" \"agent_rfv_a,agent_rftc_a,\" \"agent_rfv_b,agent_rftc_b,\" \"agent_rfv_c,agent_rftc_c,\" \"random_rfv_a,random_rftc_a,\"", "likely play 24, which beats 23 <-- hopefully 24 has", "with the most points. Return: (agent_is_declarer, declarer_idx) \"\"\" def agent_declarer(hands):", "to play a card and must play a Diamond if", "card or highest necessary card? self.higher_card(valid_cards, agent_valid_cards, card, cards_played, partners_cards)", "card played so far is higher than agent's highest card", "Representing a \"State\" as an opening suit and frozenset of", "red flags for every STATS_PER games. barf.clear_red_flags() barf_random.clear_red_flags() NS_Wins.append(0) NS_Wins_random.append(0)", "# for the last game, determine and write out policy", "each hand. Note: Ace is 12, 25, 38, 51 \"\"\"", "f.write(\"State: suit {} | cards played {} | partner's card", "\" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" NUM_ACTIONS = 52 # Agent can choose any", "def highest_card(self, valid_cards, agent_valid_cards, card): if len(agent_valid_cards) > 1 and", "cards hands[1] = East's cards hands[2] = Agent's cards hands[3]", "play highest card or highest necessary card? self.higher_card(valid_cards, agent_valid_cards, card,", "play_random_card(opening_suit, hands[player_idx]) agent_card_played[0] = card barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx,", "{} | cards played {} | partner's card {}\\nBest Card", "cards_played, partners_cards) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" PLAY A SINGLE GAME OF", "play_game(game=game, hands=copy.deepcopy(hands), train=True, ba=ba, barf=barf) NS_Wins_random[-1] += play_game(game=game, hands=hands, ba=None,", "0)) We have a Diamond 12 and Club 2 with", "card % 13 == 10: p += 2 elif card", "state = State(1, set(23, 0)) card_played = 24 If 4th", "This function checks if the agent wins a round when", "barf=None): partners_cards = copy.copy(hands[0]) agents_cards = copy.copy(hands[2]) declarer, d =", "main(): start_time = datetime.datetime.now() hands = [] # TRAINING print(f\"TRAINING", "This function determines the declarer based on partnership with the", "NS_Wins = 0 # used to count total wins in", "= BridgeAgentRedFlags() barf_random = BridgeAgentRedFlags() NS_Wins = [0] NS_Wins_random =", "is 14 Representing a \"Suit\" as an integer: n/a is", "add_loss(self, state, card_played): self.weights[state][card_played] /= (1 + 0.1 * self.alpha", "for player in range(4): card = None player_idx = (round_winner", "they play their partner's cards if ba and declarer: agent_state_2", "cards played already and the agent has at least one", "= np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT) average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT) print(f\"Average Win Delta", "11 Queen is 12 King is 13 Ace is 14", "elif card % 13 == 11: p += 3 elif", "play_game(game=game, hands=hands, ba=None, barf=barf_random) ba.game_num += 1 if (game +", "weight. # index_into_valid_counts = 1 since 20 has a smaller", "starts the game NS_Wins = 0 # used to count", "self.game_num) \"\"\" EXAMPLE state = State(1, set(23, 0)) card_played =", "and 25 since a Diamond 13 and Diamond 14 will", "self.partner_win(valid_cards, agent_valid_cards, card, cards_played, partners_cards) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" PLAY A", "agent play lowest card? do they beat their partner? self.partner_win(valid_cards,", "= cards_in_hand # Choose the valid card with highest weight.", "if declarer: ba.add_loss(agent_state_2, agent_card_played[1]) # for the last game, determine", "12 are Club 2 -> Club 14 Cards 13 ->", "a smaller weight than 24. # index_into_valid_cards = np.argmax(self.weights[state][valid_cards]) index_into_valid_cards", "of 1 NT bridge and outputs a winner. \"\"\" def", "card < (suit + 1) * 13 and card >", "must play a Diamond if it has one. Representing the", "winning card. round_winner = (determine_round_winner(opening_suit, cards_played) + round_winner) % 4", "\"\"\" EXAMPLE state = State(1, set(23, 0)) cards_in_hand = set(0,", "NS_Wins def game_summary(ba, t, iterations=NUM_GAMES_TRAIN): with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" +", "4 # the person to the right of the declarer", "+ \"_Game_Data_Train-\" + str(t) + \".csv\", 'w') as k: k.write(\"game,\"", "on {NUM_GAMES_TEST} games\") game_summary(ba, False, iterations=NUM_GAMES_TEST) end_time = datetime.datetime.now() print(\"Runtime:", "% 4 # Adjust the BridgeAgent weights. # If the", "function determines the winner of the round. \"\"\" def determine_round_winner(suit,", "24. ba.add_loss(state, card_played) \"\"\" def add_loss(self, state, card_played): self.weights[state][card_played] /=", "their partner? self.partner_win(valid_cards, agent_valid_cards, card, cards_played, partners_cards) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \"", "beats 23 <-- hopefully 24 has the highest weight. card_played", "play 24, which beats 23 <-- hopefully 24 has the", "play randomly. \"\"\" def play_random_card(suit, cards_in_hand): if suit == -1:", "games. barf.clear_red_flags() barf_random.clear_red_flags() NS_Wins.append(0) NS_Wins_random.append(0) average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins)", "chosen. self.weights = {} self.weights[State(-1, frozenset(), -1)] = np.full(NUM_ACTIONS, 1.0)", "EXAMPLE state = State(1, set(23, 0)) # Diamond 12, Club", "even though their partner is guaranteed to win. \"\"\" def", "== 0: return random.choice(cards_in_hand) return random.choice(valid_cards) \"\"\" This function determines", "(1, set(23, 0)), weights[state] will likely have very large values", "randomly. \"\"\" def play_random_card(suit, cards_in_hand): if suit == -1: return", "the agent plays a higher card even though their partner", "card = ba.play_card(agent_state, hands[player_idx]) else: card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[0]", "0.9995 # 10,000 # self.alpha = 0.99995 # 100,000 self.alpha", "FUNCTIONS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This functions deals random cards. \"\"\"", "= 0.999995 # 1,000,000 # self.alpha = 0.9999995 # 5,000,000", "with which the agent should play each of the 52", "# Adjust the BridgeAgent weights. # If the BridgeAgent or", "0 Diamonds is 1 Hearts is 2 Spades is 3", "p += 2 elif card % 13 == 9: p", "round_winner == 2: if ba and train: ba.add_win(agent_state, agent_card_played[0]) if", "# We initialize all weights to 1 such that every", "[] # records which states have been updated for this", "card self.highest_card(valid_cards, agent_valid_cards, card) # 3 cards played and agent", "= game_summary(ba, True) # TESTING -- we don't change the", "deck[39:52]] \"\"\" This function is used by non-agents who play", "is higher than agent's highest card self.highest_card(valid_cards, agent_valid_cards, card) #", "Jack is 11 Queen is 12 King is 13 Ace", "valid_cards = cards_in_hand # Choose the valid card with highest", "starting from round_winner for player in range(4): card = None", "average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT) print(f\"Average Win Delta (want this to", "very large values at indices 24 and 25 since a", "\" PLAY A SINGLE GAME OF BRIDGE \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\"", "\"\"\" CS 238 Final Project: Bridge RL Agent <NAME> &", "collections import namedtuple \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' REPRESENTATIONS OF BRIDGE Representing a \"Card\"", "for opening_suit in range(4): for card_1 in range(52): for card_2", "for card_partner in [-1, card_1, card_2, card_3]: state = State(", "= 24 If 4th card is 25 (Diamond 14), then", "play_random_card(suit, cards_in_hand): if suit == -1: return random.choice(cards_in_hand) valid_cards =", "return # highest card played so far is higher than", "\"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" TRACKS PERFORMANCE OF BRIDGE AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\"", "NS_Wins = [0] NS_Wins_random = [0] for game in range(iterations):", "STATS_PER = 1000 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" RL AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\"", "True, 2 # agent is not declarer and agent should", "their partner is guaranteed to win. \"\"\" def partner_win(self, valid_cards,", "agent plays their highest card even though the highest card", "Random bot plays card = play_random_card(opening_suit, hands[player_idx]) # Keep track", "= 52 # Agent can choose any card to play", "for i in range(suit * 13, (suit + 1) *", "self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1 if card == max(agent_valid_cards): self.RED_FLAG_VIOLATIONS[0] += 1", "agent_card_played[0]) if declarer: ba.add_win(agent_state_2, agent_card_played[1]) NS_Wins += 1 else: if", "are valid). NUM_GAMES_TRAIN = 10000 NUM_GAMES_TEST = 10000 STATS_PER =", "return points \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" TRACKS PERFORMANCE OF BRIDGE AGENT", "== -1: return random.choice(cards_in_hand) valid_cards = [i for i in", "def clear_red_flags(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) \"\"\" This", "functions deals random cards. \"\"\" deck = list(range(52)) def shuffle_cards():", "opening_suit in range(4): for card_1 in range(52): for card_2 in", "there's three cards played already and the agent has at", "print(f\"{game + 1} / \", end=\"\", flush=True) rfv = barf.RED_FLAG_VIOLATIONS", "NUM_GAMES_TRAIN = 10000 NUM_GAMES_TEST = 10000 STATS_PER = 1000 \"\"\"'''''''''''''''''''''''''''''''''''''''''''''''''''''''''", "winner. \"\"\" def play_game(game, hands, train=False, ba=None, barf=None): partners_cards =", "barf=barf_random) ba.game_num += 1 if (game + 1) % STATS_PER", "= barf.RED_FLAG_TOTAL_COUNT rftc_random = barf_random.RED_FLAG_TOTAL_COUNT with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" +", "25 since a Diamond 13 and Diamond 14 will beat", "state.partners_card, policy[count])) if state in states_accessed: g.write(\"State: suit {} |", "opening_suit, frozenset([card_1, card_2, card_3]), card_partner) self.weights[state] = np.full(NUM_ACTIONS, 1.0) #", "points = count_points(hands) # determines the number of points in", "def count_points(hands): points = [] for hand in hands: p", "self.RED_FLAG_TOTAL_COUNT = np.zeros(3) \"\"\" This function checks if the agent", "\"\"\" EXAMPLE state = State(1, set(23, 0)) # Diamond 12,", "card, cards_played, partners_cards) # 3 cards played + partner has", "Hand: {}\\n\\n\".format(cards_in_hand)) with open(filename + \".txt\", 'w') as f: for", "round_winner for player in range(4): card = None player_idx =", "ba.play_card(agent_state_2, hands[player_idx]) barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards) else: card", "class BridgeAgent: def __init__(self): # We initialize all weights to", "length-52 represets the proportion with which the agent should play", "PERFORMANCE OF BRIDGE AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgentRedFlags: def __init__(self):", "Flag Ratios - Agent: {average_rf_ratios_agent}\") print(f\"Average Red Flag Ratios -", "Diamond 14 will beat the Diamond 12. '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" State =", "checks if the agent wins a round when there's three", "this example, agent will most likely play 24, which beats", "represets the proportion with which the agent should play each", "ba.play_card(agent_state, hands[player_idx]) else: card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[0] = card", "CONSTANTS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" NUM_ACTIONS = 52 # Agent can choose", "the declarer based on partnership with the most points. Return:", "card_3]: state = State( opening_suit, frozenset([card_1, card_2, card_3]), card_partner) self.weights[state]", "to play (only some are valid). NUM_GAMES_TRAIN = 10000 NUM_GAMES_TEST", "card_1, card_2, card_3]: state = State( opening_suit, frozenset([card_1, card_2, card_3]),", "ba.add_loss(state, card_played) \"\"\" def add_loss(self, state, card_played): self.weights[state][card_played] /= (1", "end=\"\", flush=True) rfv = barf.RED_FLAG_VIOLATIONS rfv_random = barf_random.RED_FLAG_VIOLATIONS rftc =", "agent_card_played[1]) states_accessed.append(agent_state) card = ba.play_card(agent_state, hands[player_idx]) else: card = play_random_card(opening_suit,", "f\"{rfv_random[1]},{rftc_random[1]},\" f\"{rfv_random[2]},{rftc_random[2]},\" f\"\\n\") # Cumulative statistics on red flags for", "* 13) if i in cards_in_hand] if len(valid_cards) == 0:", "with which we play 24. ba.add_win(state, card_played) \"\"\" def add_win(self,", "import datetime import numpy as np import random from collections", "20 has a smaller weight than 24. # index_into_valid_cards =", "3 elif card % 13 == 10: p += 2", "self.ALL_RED_FLAG_VIOLATIONS[1] += 1 \"\"\" This function checks if the agent", "\"agent_rfv_b,agent_rftc_b,\" \"agent_rfv_c,agent_rftc_c,\" \"random_rfv_a,random_rftc_a,\" \"random_rfv_b,random_rftc_b,\" \"random_rfv_c,random_rftc_c\\n\") barf = BridgeAgentRedFlags() barf_random =", "in range(4): for card_1 in range(52): for card_2 in range(card_1,", "determine and write out policy if ba and game ==", "def __init__(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) self.ALL_RED_FLAG_VIOLATIONS =", "card = None player_idx = (round_winner + player) % 4", "bot plays card = play_random_card(opening_suit, hands[player_idx]) # Keep track of", "in Hand: {}\\n\\n\".format(cards_in_hand)) with open(filename + \".txt\", 'w') as f:", "= [] agent_card_played = [-1, -1] agent_state = None agent_state_2", "cards played and agent has higher cards, does it play", "+= 1 self.ALL_RED_FLAG_VIOLATIONS[1] += 1 \"\"\" This function checks if", "as k: k.write( f\"{game + 1},\" f\"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},\" f\"{rfv[0]},{rftc[0]},\"", "str(t) + \".csv\", 'w') as k: k.write(\"game,\" \"agent_wins,random_wins,diff_wins,\" \"agent_rfv_a,agent_rftc_a,\" \"agent_rfv_b,agent_rftc_b,\"", "+= 1 if (game + 1) % STATS_PER == 0:", "ba: agent_state = State(opening_suit, frozenset(cards_played), agent_card_played[1]) states_accessed.append(agent_state) card = ba.play_card(agent_state,", "suit and frozenset of up to 3 \"Card\"-s: state =", "== 3 and len(agent_valid_cards) > 1 and max(valid_cards) in partners_cards", "ba and train: ba.add_win(agent_state, agent_card_played[0]) if declarer: ba.add_win(agent_state_2, agent_card_played[1]) NS_Wins", "\"\"\" def play_game(game, hands, train=False, ba=None, barf=None): partners_cards = copy.copy(hands[0])", "self.weights[State(-1, frozenset(), -1)] = np.full(NUM_ACTIONS, 1.0) for opening_suit in range(4):", "[20, 24] valid_cards = np.array([i for i in range(suit *", "+ \".csv\", 'w') as m: m.write(f\"avg_win_delta,avg_rf_agent,avg_rf_random\\n\" f\"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\\n\") return ba def", "k.write( f\"{game + 1},\" f\"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},\" f\"{rfv[0]},{rftc[0]},\" f\"{rfv[1]},{rftc[1]},\" f\"{rfv[2]},{rftc[2]},\"", "# 1,000,000 # self.alpha = 0.9999995 # 5,000,000 self.game_num =", "flags for every STATS_PER games. barf.clear_red_flags() barf_random.clear_red_flags() NS_Wins.append(0) NS_Wins_random.append(0) average_win_delta", "y[max] = -1 max = np.argmax(y) policy.append(max) count += 1", "13 == 10: p += 2 elif card % 13", "if the agent plays a higher card even though their", "(sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins) - 1) * STATS_PER) average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS,", "Card To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) if state in", "and Club 2 with an opening suit of Diamonds. The", "1 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" UTILITY FUNCTIONS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This", "average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT) average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT) print(f\"Average Win", "cards hands[2] = Agent's cards hands[3] = West's cards \"\"\"", "the person to the right of the declarer starts the", "cards_in_hand): if suit == -1: return random.choice(cards_in_hand) valid_cards = [i", "if card % 13 == 12: p += 4 elif", "\" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This function plays 13 rounds of 1", "if (game + 1) % STATS_PER == 0: print(f\"{game +", "card else: # Random bot plays card = play_random_card(opening_suit, hands[player_idx])", "NS_Wins[-1] += play_game(game=game, hands=copy.deepcopy(hands), train=True, ba=ba, barf=barf) NS_Wins_random[-1] += play_game(game=game,", "frozenset(cards_played), agent_card_played[1]) states_accessed.append(agent_state) card = ba.play_card(agent_state, hands[player_idx]) else: card =", "agent_card_played[1] = card else: # Random bot plays card =", "f\"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},\" f\"{rfv[0]},{rftc[0]},\" f\"{rfv[1]},{rftc[1]},\" f\"{rfv[2]},{rftc[2]},\" f\"{rfv_random[0]},{rftc_random[0]},\" f\"{rfv_random[1]},{rftc_random[1]},\" f\"{rfv_random[2]},{rftc_random[2]},\" f\"\\n\")", "ba.add_win(state, card_played) \"\"\" def add_win(self, state, card_played): self.weights[state][card_played] *= (1", "partners_cards) else: card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[1] = card else:", "as an integer: Cards 0 -> 12 are Club 2", "self.RED_FLAG_VIOLATIONS[2] += 1 self.ALL_RED_FLAG_VIOLATIONS[2] += 1 \"\"\" This function checks", "3rd card in round If 4th card is not 25,", "ba = BridgeAgent() ba = game_summary(ba, True) # TESTING --", "= State(1, frozenset(23, 0)) We have a Diamond 12 and", "self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1 if card > max(valid_cards): self.RED_FLAG_VIOLATIONS[2] += 1", "{}\\nBest Card To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) count +=", "beat their partner? self.partner_win(valid_cards, agent_valid_cards, card, cards_played, partners_cards) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \"", "\"\"\" def write_policy(self, cards_in_hand, policy, filename, states_accessed): count = 0", "24, which beats 23 <-- hopefully 24 has the highest", "print(f\"Average Red Flag Ratios - Agent: {average_rf_ratios_agent}\") print(f\"Average Red Flag", "This function checks for any red flags based on what", "1 game_file = \"Bridge_\" + str(game + 1) ba.write_policy(agents_cards, policy,", "decrease the proportion with which we play 24. ba.add_loss(state, card_played)", "deck[13:26], deck[26:39], deck[39:52]] \"\"\" This function is used by non-agents", "card is not 25, then the agent wins. We want", "game_summary(ba, True) # TESTING -- we don't change the weights", "is guaranteed to win. \"\"\" def partner_win(self, valid_cards, agent_valid_cards, card,", "= 0.99995 # 100,000 self.alpha = 0.999995 # 1,000,000 #", "of the declarer starts the game NS_Wins = 0 #", "highest card self.highest_card(valid_cards, agent_valid_cards, card) # 3 cards played and", "card) # 3 cards played and agent has higher cards,", "'''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgentRedFlags: def __init__(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT =", "want to decrease the proportion with which we play 24.", "states have been updated for this game # For each", "Club 2 -> Club 14 Cards 13 -> 25 are", "cards played {} | partner's card {}\\nBest Card To Play:", "a \"Suit\" as an integer: n/a is -1 <-- used", "Agent: {average_rf_ratios_agent}\") print(f\"Average Red Flag Ratios - Random: {average_rf_ratios_random}\") with", "def main(): start_time = datetime.datetime.now() hands = [] # TRAINING", "card > max(valid_cards): self.RED_FLAG_VIOLATIONS[2] += 1 self.ALL_RED_FLAG_VIOLATIONS[2] += 1 \"\"\"", "51 are Spade 2 -> Spade 14 Jack is 11", "hand # agent's partnership has more points and agent is", "state = State( opening_suit, frozenset([card_1, card_2, card_3]), card_partner) self.weights[state] =", "state, cards_in_hand): # Following the EXAMPLE: # suit = 1", "np.full(NUM_ACTIONS, 1.0) # self.alpha = 0.997 # 1,000 # self.alpha", "-> Heart 14 Cards 39 -> 51 are Spade 2", "= State( opening_suit, frozenset([card_1, card_2, card_3]), card_partner) self.weights[state] = np.full(NUM_ACTIONS,", "states_accessed.append(agent_state) card = ba.play_card(agent_state, hands[player_idx]) else: card = play_random_card(opening_suit, hands[player_idx])", "= count_points(hands) # determines the number of points in each", "PLAY A SINGLE GAME OF BRIDGE \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This", "partners_cards): all_valid_cards = list(range(suit * 13, (suit + 1) *", "39 -> 51 are Spade 2 -> Spade 14 Jack", "13 and card > max_val: max_val, max_idx = card, idx", "None agent_state_2 = None opening_suit = -1 # Each player", "which beats 23 <-- hopefully 24 has the highest weight.", "cards_in_hand): # Following the EXAMPLE: # suit = 1 suit", "if suit == -1: return random.choice(cards_in_hand) valid_cards = [i for", "Card To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) count += 1", "\" \" DEFINE SOME CONSTANTS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" NUM_ACTIONS = 52", "hands[player_idx]) agent_card_played[0] = card barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards)", "Heart 2 -> Heart 14 Cards 39 -> 51 are", "We initialize all weights to 1 such that every card", "as f: for state in self.weights: f.write(\"State: suit {} |", "NT bridge and outputs a winner. \"\"\" def play_game(game, hands,", "Representing a \"Suit\" as an integer: n/a is -1 <--", "Ratios - Agent: {average_rf_ratios_agent}\") print(f\"Average Red Flag Ratios - Random:", "= West's cards \"\"\" round_winner = (d + 1) %", "play 24. ba.add_loss(state, card_played) \"\"\" def add_loss(self, state, card_played): self.weights[state][card_played]", "BridgeAgentRedFlags() barf_random = BridgeAgentRedFlags() NS_Wins = [0] NS_Wins_random = [0]", "ba = game_summary(ba, True) # TESTING -- we don't change", "return random.choice(cards_in_hand) return random.choice(valid_cards) \"\"\" This function determines the winner", "round If 4th card is not 25, then the agent", "has the highest weight. The agent must play a Diamond", "index_into_valid_cards = np.argmax(self.weights[state][valid_cards]) index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max())) # returns", "self.RED_FLAG_TOTAL_COUNT[1] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1 if card < max(valid_cards):", "in cards_in_hand]) if len(valid_cards) == 0: valid_cards = cards_in_hand #", "in round card_played = 24 # Diamond 13 <-- 3rd", "hands[player_idx]) else: card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[0] = card barf.assess_card_played(hands,", "every card has an equal chance of being chosen. self.weights", "agent_valid_cards, card, cards_played, partners_cards) # 3 cards played + partner", "the agent plays their highest card even though the highest", "which we play 24. ba.add_win(state, card_played) \"\"\" def add_win(self, state,", "state, card_played): self.weights[state][card_played] *= (1 + 0.1 * self.alpha **", "to decrease the proportion with which we play 24. ba.add_loss(state,", "d = agent_declarer(hands) \"\"\" hands[0] = North's cards hands[1] =", "np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT) print(f\"Average Win Delta (want this to be positive):", "i in hands[player_idx]]) if suit == -1: return # highest", "has at least one higher card than what's been played.", "((len(NS_Wins) - 1) * STATS_PER) average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT) average_rf_ratios_random", "has a smaller weight than 24. # index_into_valid_cards = np.argmax(self.weights[state][valid_cards])", "1 since 20 has a smaller weight than 24. #", "and card > max_val: max_val, max_idx = card, idx return", "= 0.997 # 1,000 # self.alpha = 0.9995 # 10,000", "'partners_card']) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" DEFINE SOME CONSTANTS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" NUM_ACTIONS", "\"\"\" This function checks if the agent plays their highest", "in agent partnership states_accessed = [] # records which states", "proportion with which we play 24. ba.add_win(state, card_played) \"\"\" def", "We want to decrease the proportion with which we play", "Get the winning card. round_winner = (determine_round_winner(opening_suit, cards_played) + round_winner)", "\"\"\" This function checks if the agent plays a higher", "if declarer: ba.add_win(agent_state_2, agent_card_played[1]) NS_Wins += 1 else: if ba", "0.1 * self.alpha ** self.game_num) \"\"\" EXAMPLE state = State(1,", "ba=None, barf=barf_random) ba.game_num += 1 if (game + 1) %", "partners_cards ): self.RED_FLAG_TOTAL_COUNT[1] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1 if card", "play lowest card? do they beat their partner? self.partner_win(valid_cards, agent_valid_cards,", "this example, with state = (1, set(23, 0)), weights[state] will", "suit = state.opening_suit # valid_cards = [20, 24] valid_cards =", "Club 14 Cards 13 -> 25 are Diamond 2 ->", "far is higher than agent's highest card self.highest_card(valid_cards, agent_valid_cards, card)", "every STATS_PER games. barf.clear_red_flags() barf_random.clear_red_flags() NS_Wins.append(0) NS_Wins_random.append(0) average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random))", "1.0) for opening_suit in range(4): for card_1 in range(52): for", "weights. # If the BridgeAgent or N wins. if round_winner", "random.choice(cards_in_hand) valid_cards = [i for i in range(suit * 13,", "higher card even though their partner is guaranteed to win.", "length-52: We call this Map \"weights\". And the array of", "or round_winner == 2: if ba and train: ba.add_win(agent_state, agent_card_played[0])", "def play_random_card(suit, cards_in_hand): if suit == -1: return random.choice(cards_in_hand) valid_cards", "player_idx, partners_cards) else: card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[1] = card", "end_time - start_time) # runtime if __name__ == \"__main__\": main()", "random.choice(cards_in_hand) return random.choice(valid_cards) \"\"\" This function determines the winner of", "\"random_rfv_a,random_rftc_a,\" \"random_rfv_b,random_rftc_b,\" \"random_rfv_c,random_rftc_c\\n\") barf = BridgeAgentRedFlags() barf_random = BridgeAgentRedFlags() NS_Wins", "-1 # Each player plays a card in order starting", "# suit = 1 suit = state.opening_suit # valid_cards =", "the weights here print(f\"TESTING on {NUM_GAMES_TEST} games\") game_summary(ba, False, iterations=NUM_GAMES_TEST)", "if (len(cards_played) == 3 and len(agent_valid_cards) > 1 and max(agent_valid_cards)", "if i in cards_played]) agent_valid_cards = np.array([i for i in", "an array of length-52: We call this Map \"weights\". And", "= np.argmax(y) while max in x.cards_played: y[max] = -1 max", "suit == -1: return # highest card played so far", "(game + 1) % STATS_PER == 0: print(f\"{game + 1}", "Heart 14 Cards 39 -> 51 are Spade 2 ->", "state.partners_card, policy[count])) count += 1 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" UTILITY FUNCTIONS", "self.weights: f.write(\"State: suit {} | cards played {} | partner's", "52): for card_3 in range(card_2, 52): for card_partner in [-1,", "0 with open(filename + \"_Last_Game.txt\", 'w') as g: g.write(\"Cards in", "> 1 and max(agent_valid_cards) > max(valid_cards) and max(valid_cards) not in", "barf=barf) NS_Wins_random[-1] += play_game(game=game, hands=hands, ba=None, barf=barf_random) ba.game_num += 1", "in partners_cards ): self.RED_FLAG_TOTAL_COUNT[1] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1 if", "NS_Wins += 1 else: if ba and train: ba.add_loss(agent_state, agent_card_played[0])", "# TESTING -- we don't change the weights here print(f\"TESTING", "card % 13 == 12: p += 4 elif card", "random.choice(valid_cards) \"\"\" This function determines the winner of the round.", "ba.add_loss(agent_state_2, agent_card_played[1]) # for the last game, determine and write", "cards played + partner has played highest card, does agent", "datetime.datetime.now() hands = [] # TRAINING print(f\"TRAINING on {NUM_GAMES_TRAIN} games\")", "= ba.play_card(agent_state, hands[player_idx]) else: card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[0] =", "based on what the agent played. \"\"\" def assess_card_played(self, hands,", "f\"{rfv[0]},{rftc[0]},\" f\"{rfv[1]},{rftc[1]},\" f\"{rfv[2]},{rftc[2]},\" f\"{rfv_random[0]},{rftc_random[0]},\" f\"{rfv_random[1]},{rftc_random[1]},\" f\"{rfv_random[2]},{rftc_random[2]},\" f\"\\n\") # Cumulative statistics", "of the 52 cards given that it is at that", "the agent loses. We want to decrease the proportion with", "print(\"Runtime: \", end_time - start_time) # runtime if __name__ ==", "the winner of the round. \"\"\" def determine_round_winner(suit, cards_played): max_idx", "<-- 3rd card in round If 4th card is not", "call this Map \"weights\". And the array of length-52 represets", "agent will most likely play 24, which beats 23 <--", "has Diamonds. In this example, agent will most likely play", "38 are Heart 2 -> Heart 14 Cards 39 ->", "def game_summary(ba, t, iterations=NUM_GAMES_TRAIN): with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t)", "= np.zeros(3) # Cumulative def clear_red_flags(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT", "agent has at least one higher card than what's been", "random from collections import namedtuple \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' REPRESENTATIONS OF BRIDGE Representing", "\"Bridge_\" + str(game + 1) ba.write_policy(agents_cards, policy, game_file, states_accessed) return", "policy[count])) if state in states_accessed: g.write(\"State: suit {} | cards", "= datetime.datetime.now() print(\"Runtime: \", end_time - start_time) # runtime if", "as np import random from collections import namedtuple \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' REPRESENTATIONS", "38) The agent choose to play whichever remaining card has", "barf_random.RED_FLAG_VIOLATIONS rftc = barf.RED_FLAG_TOTAL_COUNT rftc_random = barf_random.RED_FLAG_TOTAL_COUNT with open(str(NUM_GAMES_TRAIN) +", "cards_in_hand = set(0, 1, 4, 8, 11, 20, 24, 38)", "range(52): for card_2 in range(card_1, 52): for card_3 in range(card_2,", "Cards 0 -> 12 are Club 2 -> Club 14", "return valid_cards[index_into_valid_cards] \"\"\" This function write the policy at the", "cards_played]) agent_valid_cards = np.array([i for i in all_valid_cards if i", "\"\"\" This function plays 13 rounds of 1 NT bridge", "count_points(hands): points = [] for hand in hands: p =", "cards_played): max_idx = -1 max_val = -1 for idx, card", "example, with state = (1, set(23, 0)), weights[state] will likely", "1 and max(valid_cards) > max(agent_valid_cards): self.RED_FLAG_TOTAL_COUNT[0] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[0] +=", "highest card played so far is higher than agent's highest", "= None opening_suit = -1 # Each player plays a", "West's cards \"\"\" round_winner = (d + 1) % 4", "if the agent wins a round when there's three cards", "= -1 max = np.argmax(y) policy.append(max) count += 1 game_file", "cards in round card_played = 24 # Diamond 13 <--", "= (sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins) - 1) * STATS_PER) average_rf_ratios_agent =", "self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3) # Cumulative self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3) # Cumulative", "Spade 14 Jack is 11 Queen is 12 King is", "25, then the agent wins. We want to incrase the", "\"\"\" deck = list(range(52)) def shuffle_cards(): random.shuffle(deck) return [deck[0:13], deck[13:26],", "in range(13): cards_played = [] agent_card_played = [-1, -1] agent_state", "cards given that it is at that state. In this", "if suit * 13 <= card < (suit + 1)", "if card > max(valid_cards): self.RED_FLAG_VIOLATIONS[2] += 1 self.ALL_RED_FLAG_VIOLATIONS[2] += 1", "card even though their partner is guaranteed to win. \"\"\"", "4 elif card % 13 == 11: p += 3", "+ \".csv\", 'w') as k: k.write(\"game,\" \"agent_wins,random_wins,diff_wins,\" \"agent_rfv_a,agent_rftc_a,\" \"agent_rfv_b,agent_rftc_b,\" \"agent_rfv_c,agent_rftc_c,\"", "the agent should play each of the 52 cards given", "determines the winner of the round. \"\"\" def determine_round_winner(suit, cards_played):", "declarer and agent should start the play return False, -1", "11, 20, 24, 38) The agent choose to play whichever", "person to the right of the declarer starts the game", "np import random from collections import namedtuple \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' REPRESENTATIONS OF", "open(filename + \"_Last_Game.txt\", 'w') as g: g.write(\"Cards in Hand: {}\\n\\n\".format(cards_in_hand))", "cards_played, player_idx, partners_cards) else: card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[1] =", "*= (1 + 0.1 * self.alpha ** self.game_num) \"\"\" EXAMPLE", "opening_suit = -1 # Each player plays a card in", "1 \"\"\" This function checks for any red flags based", "1) % STATS_PER == 0: print(f\"{game + 1} / \",", "Delta (want this to be positive): {average_win_delta}\") print(f\"Average Red Flag", "weight than 24. # index_into_valid_cards = np.argmax(self.weights[state][valid_cards]) index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards]", "add_win(self, state, card_played): self.weights[state][card_played] *= (1 + 0.1 * self.alpha", "states_accessed.append(agent_state_2) card = ba.play_card(agent_state_2, hands[player_idx]) barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx,", "(suit + 1) * 13)) valid_cards = np.array([i for i", "= np.zeros(3) self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3) # Cumulative self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3)", "'''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgent: def __init__(self): # We initialize all weights", "26 -> 38 are Heart 2 -> Heart 14 Cards", "# 3 cards played + partner has played highest card,", "count = 0 with open(filename + \"_Last_Game.txt\", 'w') as g:", "Flag Ratios - Random: {average_rf_ratios_random}\") with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Avg_Train-\" +", "if agent is declarer, they play their partner's cards if", "or N wins. if round_winner == 0 or round_winner ==", "# Diamond 13 <-- 3rd card in round If 4th", "To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) if state in states_accessed:", "m.write(f\"avg_win_delta,avg_rf_agent,avg_rf_random\\n\" f\"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\\n\") return ba def main(): start_time = datetime.datetime.now() hands", "in x.cards_played: y[max] = -1 max = np.argmax(y) policy.append(max) count", "{} | partner's card {}\\nBest Card To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played,", "if player == 0: opening_suit = card // 13 hands[player_idx].remove(card)", "beat the Diamond 12. '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" State = namedtuple('State', ['opening_suit', 'cards_played',", "Diamond 14 Cards 26 -> 38 are Heart 2 ->", "\"_Game_Data_Train-\" + str(t) + \".csv\", 'w') as k: k.write(\"game,\" \"agent_wins,random_wins,diff_wins,\"", "# If the BridgeAgent or N wins. if round_winner ==", "cards, does it play highest card or highest necessary card?", "self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3) # Cumulative def clear_red_flags(self): self.RED_FLAG_VIOLATIONS = np.zeros(3)", "used in a \"State\" where no cards have been played", "+ \".csv\", 'a') as k: k.write( f\"{game + 1},\" f\"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1]", "and declarer: agent_state_2 = State(opening_suit, frozenset(cards_played), agent_card_played[0]) states_accessed.append(agent_state_2) card =", "= 0 # used to count total wins in agent", "elif player_idx == 0: # if agent is declarer, they", "\".csv\", 'w') as m: m.write(f\"avg_win_delta,avg_rf_agent,avg_rf_random\\n\" f\"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\\n\") return ba def main():", "policy, game_file, states_accessed) return NS_Wins def game_summary(ba, t, iterations=NUM_GAMES_TRAIN): with", "card_3 in range(card_2, 52): for card_partner in [-1, card_1, card_2,", "13, (suit + 1) * 13) if i in cards_in_hand])", "Club 2 <-- first 2 cards in round card_played =", "= BridgeAgent() ba = game_summary(ba, True) # TESTING -- we", "+ \"_Last_Game.txt\", 'w') as g: g.write(\"Cards in Hand: {}\\n\\n\".format(cards_in_hand)) with", "g.write(\"State: suit {} | cards played {} | partner's card", "def write_policy(self, cards_in_hand, policy, filename, states_accessed): count = 0 with", "plays a higher card even though their partner is guaranteed", "plays a card in order starting from round_winner for player", "agent_card_played[1]) # for the last game, determine and write out", "f\"{rfv_random[2]},{rftc_random[2]},\" f\"\\n\") # Cumulative statistics on red flags for every", "& <NAME> \"\"\" import copy import datetime import numpy as", "f\"{rfv[1]},{rftc[1]},\" f\"{rfv[2]},{rftc[2]},\" f\"{rfv_random[0]},{rftc_random[0]},\" f\"{rfv_random[1]},{rftc_random[1]},\" f\"{rfv_random[2]},{rftc_random[2]},\" f\"\\n\") # Cumulative statistics on", "to the right of the declarer starts the game NS_Wins", "have very large values at indices 24 and 25 since", "# 10,000 # self.alpha = 0.99995 # 100,000 self.alpha =", "indices 24 and 25 since a Diamond 13 and Diamond", "player in range(4): card = None player_idx = (round_winner +", "Diamond 13 and Diamond 14 will beat the Diamond 12.", "highest weight. # index_into_valid_counts = 1 since 20 has a", "\" UTILITY FUNCTIONS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This functions deals random", "a round when there's three cards played already and the", "in [-1, card_1, card_2, card_3]: state = State( opening_suit, frozenset([card_1,", "in range(card_1, 52): for card_3 in range(card_2, 52): for card_partner", "proportion with which we play 24. ba.add_loss(state, card_played) \"\"\" def", "\"State\" where no cards have been played yet. Clubs is", "# index_into_valid_cards = np.argmax(self.weights[state][valid_cards]) index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max())) #", "state = State(1, set(23, 0)) # Diamond 12, Club 2", "\"\"\" This function checks for any red flags based on", "in hands[player_idx]]) if suit == -1: return # highest card", "# 1,000 # self.alpha = 0.9995 # 10,000 # self.alpha", "filename, states_accessed): count = 0 with open(filename + \"_Last_Game.txt\", 'w')", "self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3) #", "Club 2 with an opening suit of Diamonds. The agent", "count total wins in agent partnership states_accessed = [] #", "the Diamond 12. '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" State = namedtuple('State', ['opening_suit', 'cards_played', 'partners_card'])", "card to play (only some are valid). NUM_GAMES_TRAIN = 10000", "0)) cards_in_hand = set(0, 1, 4, 8, 11, 20, 24,", "= card // 13 hands[player_idx].remove(card) cards_played.append(card) # Get the winning", "cards_played) + round_winner) % 4 # Adjust the BridgeAgent weights.", "-1: return # highest card played so far is higher", "card_played = 24 If 4th card is 25 (Diamond 14),", "most points. Return: (agent_is_declarer, declarer_idx) \"\"\" def agent_declarer(hands): points =", "round for _ in range(13): cards_played = [] agent_card_played =", "+= 1 self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1 if card > max(valid_cards): self.RED_FLAG_VIOLATIONS[2]", "in a \"State\" where no cards have been played yet.", "TRAINING print(f\"TRAINING on {NUM_GAMES_TRAIN} games\") ba = BridgeAgent() ba =", "self.RED_FLAG_TOTAL_COUNT[2] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1 if card > max(valid_cards):", "than theirs. \"\"\" def highest_card(self, valid_cards, agent_valid_cards, card): if len(agent_valid_cards)", "# Cumulative statistics on red flags for every STATS_PER games.", "of up to 3 \"Card\"-s: state = State(1, frozenset(23, 0))", "cards hands[3] = West's cards \"\"\" round_winner = (d +", "= copy.copy(hands[2]) declarer, d = agent_declarer(hands) \"\"\" hands[0] = North's", "1) * 13 and card > max_val: max_val, max_idx =", "any red flags based on what the agent played. \"\"\"", "is 11 Queen is 12 King is 13 Ace is", "We have a Diamond 12 and Club 2 with an", "23 <-- hopefully 24 has the highest weight. card_played =", "max = np.argmax(y) while max in x.cards_played: y[max] = -1", "in enumerate(cards_played): if suit * 13 <= card < (suit", "for _ in range(13): cards_played = [] agent_card_played = [-1,", "the EXAMPLE: # suit = 1 suit = state.opening_suit #", "= [] # records which states have been updated for", "elif card % 13 == 9: p += 1 points.append(p)", "suit {} | cards played {} | partner's card {}\\nBest", "State = namedtuple('State', ['opening_suit', 'cards_played', 'partners_card']) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" DEFINE", "= Agent's cards hands[3] = West's cards \"\"\" round_winner =", "large values at indices 24 and 25 since a Diamond", "states_accessed = [] # records which states have been updated", "> 1 and max(valid_cards) > max(agent_valid_cards): self.RED_FLAG_TOTAL_COUNT[0] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[0]", "opening suit of Diamonds. The agent is 3rd to play", "to 1 such that every card has an equal chance", "likely have very large values at indices 24 and 25", "GAME OF BRIDGE \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This function plays 13", "copy import datetime import numpy as np import random from", "print(f\"TESTING on {NUM_GAMES_TEST} games\") game_summary(ba, False, iterations=NUM_GAMES_TEST) end_time = datetime.datetime.now()", "in self.weights: f.write(\"State: suit {} | cards played {} |", "policy.append(max) count += 1 game_file = \"Bridge_\" + str(game +", "train=True, ba=ba, barf=barf) NS_Wins_random[-1] += play_game(game=game, hands=hands, ba=None, barf=barf_random) ba.game_num", "4 if player_idx == 2: # Agent plays if ba:", "set(23, 0)) # Diamond 12, Club 2 <-- first 2", "24. ba.add_win(state, card_played) \"\"\" def add_win(self, state, card_played): self.weights[state][card_played] *=", "Final Project: Bridge RL Agent <NAME> & <NAME> \"\"\" import", "the most points. Return: (agent_is_declarer, declarer_idx) \"\"\" def agent_declarer(hands): points", "4th card is not 25, then the agent wins. We", "ba=ba, barf=barf) NS_Wins_random[-1] += play_game(game=game, hands=hands, ba=None, barf=barf_random) ba.game_num +=", "if state in states_accessed: g.write(\"State: suit {} | cards played", "more points and agent is declarer if points[0] + points[2]", "3 and len(agent_valid_cards) > 1 and max(agent_valid_cards) > max(valid_cards) and", "frozenset([card_1, card_2, card_3]), card_partner) self.weights[state] = np.full(NUM_ACTIONS, 1.0) # self.alpha", "frozenset of up to 3 \"Card\"-s: state = State(1, frozenset(23,", "\"\"\" def higher_card(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards): if (len(cards_played)", "and max(valid_cards) in partners_cards ): self.RED_FLAG_TOTAL_COUNT[2] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[2] +=", "max = np.argmax(y) policy.append(max) count += 1 game_file = \"Bridge_\"", "+= 1 if card < max(valid_cards): self.RED_FLAG_VIOLATIONS[1] += 1 self.ALL_RED_FLAG_VIOLATIONS[1]", "self.alpha ** self.game_num) \"\"\" EXAMPLE state = State(1, set(23, 0))", "def add_win(self, state, card_played): self.weights[state][card_played] *= (1 + 0.1 *", "= play_random_card(opening_suit, hands[player_idx]) agent_card_played[1] = card else: # Random bot", "game_file = \"Bridge_\" + str(game + 1) ba.write_policy(agents_cards, policy, game_file,", "+ str(t) + \".csv\", 'a') as k: k.write( f\"{game +", "positive): {average_win_delta}\") print(f\"Average Red Flag Ratios - Agent: {average_rf_ratios_agent}\") print(f\"Average", "self.highest_card(valid_cards, agent_valid_cards, card) # 3 cards played and agent has", "= [] # TRAINING print(f\"TRAINING on {NUM_GAMES_TRAIN} games\") ba =", "declarer if points[0] + points[2] > points[1] + points[3] and", "barf.RED_FLAG_TOTAL_COUNT rftc_random = barf_random.RED_FLAG_TOTAL_COUNT with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t)", "<NAME> & <NAME> \"\"\" import copy import datetime import numpy", "13 <= card < (suit + 1) * 13 and", "on red flags for every STATS_PER games. barf.clear_red_flags() barf_random.clear_red_flags() NS_Wins.append(0)", "= [] for hand in hands: p = 0 for", "0: # if agent is declarer, they play their partner's", "barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards) elif player_idx == 0:", "game_summary(ba, False, iterations=NUM_GAMES_TEST) end_time = datetime.datetime.now() print(\"Runtime: \", end_time -", "points. Return: (agent_is_declarer, declarer_idx) \"\"\" def agent_declarer(hands): points = count_points(hands)", "-1 \"\"\" This function counts the points in each hand.", "[0] NS_Wins_random = [0] for game in range(iterations): hands =", "2 elif card % 13 == 9: p += 1", "their partner's cards if ba and declarer: agent_state_2 = State(opening_suit,", "Diamond 2 -> Diamond 14 Cards 26 -> 38 are", "13, (suit + 1) * 13)) valid_cards = np.array([i for", "as m: m.write(f\"avg_win_delta,avg_rf_agent,avg_rf_random\\n\" f\"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\\n\") return ba def main(): start_time =", "an integer: Cards 0 -> 12 are Club 2 ->", "p += 1 points.append(p) return points \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" TRACKS", "each of the 52 cards given that it is at", "if i in cards_in_hand]) if len(valid_cards) == 0: valid_cards =", "# Choose the valid card with highest weight. # index_into_valid_counts", "does agent play lowest card? do they beat their partner?", "STATS_PER == 0: print(f\"{game + 1} / \", end=\"\", flush=True)", "will likely have very large values at indices 24 and", "cards \"\"\" round_winner = (d + 1) % 4 #", "agent_valid_cards, card, cards_played, partners_cards): if (len(cards_played) == 3 and len(agent_valid_cards)", "for game in range(iterations): hands = shuffle_cards() NS_Wins[-1] += play_game(game=game,", "2 <-- first 2 cards in round card_played = 24", "plays 13 rounds of 1 NT bridge and outputs a", "1 \"\"\" This function checks if the agent plays a", "agents_cards = copy.copy(hands[2]) declarer, d = agent_declarer(hands) \"\"\" hands[0] =", "for hand in hands: p = 0 for card in", "the right of the declarer starts the game NS_Wins =", "array of length-52 represets the proportion with which the agent", "policy, filename, states_accessed): count = 0 with open(filename + \"_Last_Game.txt\",", "count += 1 game_file = \"Bridge_\" + str(game + 1)", "4th card is 25 (Diamond 14), then the agent loses.", "class BridgeAgentRedFlags: def __init__(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3)", "agent_state = State(opening_suit, frozenset(cards_played), agent_card_played[1]) states_accessed.append(agent_state) card = ba.play_card(agent_state, hands[player_idx])", "3 \"Card\"-s: state = State(1, frozenset(23, 0)) We have a", "policy[count])) count += 1 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" UTILITY FUNCTIONS \"", "def add_loss(self, state, card_played): self.weights[state][card_played] /= (1 + 0.1 *", "= State(opening_suit, frozenset(cards_played), agent_card_played[0]) states_accessed.append(agent_state_2) card = ba.play_card(agent_state_2, hands[player_idx]) barf.assess_card_played(hands,", "suit, cards_played, player_idx, partners_cards): all_valid_cards = list(range(suit * 13, (suit", "13 and Diamond 14 will beat the Diamond 12. '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\"", "from a \"State\" to an array of length-52: We call", "cards_played, player_idx, partners_cards): all_valid_cards = list(range(suit * 13, (suit +", "len(agent_valid_cards) > 1 and max(agent_valid_cards) > max(valid_cards) and max(valid_cards) not", "In this example, agent will most likely play 24, which", "= np.array([i for i in range(suit * 13, (suit +", "points \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" TRACKS PERFORMANCE OF BRIDGE AGENT \"", "index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max())) # returns valid_cards[1] = 24", "% 13 == 12: p += 4 elif card %", "- 1): policy = [] count = 0 for x", "any card to play (only some are valid). NUM_GAMES_TRAIN =", "random cards. \"\"\" deck = list(range(52)) def shuffle_cards(): random.shuffle(deck) return", "hands = [] # TRAINING print(f\"TRAINING on {NUM_GAMES_TRAIN} games\") ba", "even though the highest card already played is higher than", "for this game # For each round for _ in", "been updated for this game # For each round for", "= 0.9995 # 10,000 # self.alpha = 0.99995 # 100,000", "is declarer if points[0] + points[2] > points[1] + points[3]", "King is 13 Ace is 14 Representing a \"Suit\" as", "+= 1 if card == max(agent_valid_cards): self.RED_FLAG_VIOLATIONS[0] += 1 self.ALL_RED_FLAG_VIOLATIONS[0]", "in partners_cards ): self.RED_FLAG_TOTAL_COUNT[2] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1 if", "+= 3 elif card % 13 == 10: p +=", "hands, train=False, ba=None, barf=None): partners_cards = copy.copy(hands[0]) agents_cards = copy.copy(hands[2])", "52 cards given that it is at that state. In", "partnership with the most points. Return: (agent_is_declarer, declarer_idx) \"\"\" def", "-1)] = np.full(NUM_ACTIONS, 1.0) for opening_suit in range(4): for card_1", "array of length-52: We call this Map \"weights\". And the", "states_accessed) return NS_Wins def game_summary(ba, t, iterations=NUM_GAMES_TRAIN): with open(str(NUM_GAMES_TRAIN) +", "-> 25 are Diamond 2 -> Diamond 14 Cards 26", "max(agent_valid_cards): self.RED_FLAG_TOTAL_COUNT[0] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1 if card ==", "== 0 or round_winner == 2: if ba and train:", "for i in all_valid_cards if i in cards_played]) agent_valid_cards =", "None player_idx = (round_winner + player) % 4 if player_idx", "open(filename + \".txt\", 'w') as f: for state in self.weights:", "CS 238 Final Project: Bridge RL Agent <NAME> & <NAME>", "the play return False, -1 \"\"\" This function counts the", "on {NUM_GAMES_TRAIN} games\") ba = BridgeAgent() ba = game_summary(ba, True)", "max(valid_cards): self.RED_FLAG_VIOLATIONS[2] += 1 self.ALL_RED_FLAG_VIOLATIONS[2] += 1 \"\"\" This function", "is used by non-agents who play randomly. \"\"\" def play_random_card(suit,", "+= 1 \"\"\" This function checks if the agent plays", "train=False, ba=None, barf=None): partners_cards = copy.copy(hands[0]) agents_cards = copy.copy(hands[2]) declarer,", "1 self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1 if card == max(agent_valid_cards): self.RED_FLAG_VIOLATIONS[0] +=", "\"\"\" hands[0] = North's cards hands[1] = East's cards hands[2]", "this game # For each round for _ in range(13):", "with open(filename + \".txt\", 'w') as f: for state in", "agent_valid_cards, card) # 3 cards played and agent has higher", "[] for hand in hands: p = 0 for card", "card_2 in range(card_1, 52): for card_3 in range(card_2, 52): for", "\" DEFINE SOME CONSTANTS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" NUM_ACTIONS = 52 #", "has played highest card, does agent play lowest card? do", "1): policy = [] count = 0 for x in", "\"_Game_Data_Avg_Train-\" + str(t) + \".csv\", 'w') as m: m.write(f\"avg_win_delta,avg_rf_agent,avg_rf_random\\n\" f\"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\\n\")", "rfv = barf.RED_FLAG_VIOLATIONS rfv_random = barf_random.RED_FLAG_VIOLATIONS rftc = barf.RED_FLAG_TOTAL_COUNT rftc_random", "agent_state_2 = State(opening_suit, frozenset(cards_played), agent_card_played[0]) states_accessed.append(agent_state_2) card = ba.play_card(agent_state_2, hands[player_idx])", "shuffle_cards() NS_Wins[-1] += play_game(game=game, hands=copy.deepcopy(hands), train=True, ba=ba, barf=barf) NS_Wins_random[-1] +=", "24 If 4th card is 25 (Diamond 14), then the", "\"\"\" This functions deals random cards. \"\"\" deck = list(range(52))", "they beat their partner? self.partner_win(valid_cards, agent_valid_cards, card, cards_played, partners_cards) \"\"\"'''''''''''''''''''''''''''''''''''''''''''''''''''''''''", "= ba.play_card(state, cards_in_hand) \"\"\" def play_card(self, state, cards_in_hand): # Following", "Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) if state in states_accessed: g.write(\"State:", "= 1 \"\"\" EXAMPLE state = State(1, set(23, 0)) #", "5,000,000 self.game_num = 1 \"\"\" EXAMPLE state = State(1, set(23,", "partnership states_accessed = [] # records which states have been", "and must play a Diamond if it has one. Representing", "with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Avg_Train-\" + str(t) + \".csv\", 'w') as", "elif card % 13 == 10: p += 2 elif", "g: g.write(\"Cards in Hand: {}\\n\\n\".format(cards_in_hand)) with open(filename + \".txt\", 'w')", "1 suit = state.opening_suit # valid_cards = [20, 24] valid_cards", "the agent has at least one higher card than what's", "(len(cards_played) == 3 and len(agent_valid_cards) > 1 and max(agent_valid_cards) >", "1,000,000 # self.alpha = 0.9999995 # 5,000,000 self.game_num = 1", "str(t) + \".csv\", 'w') as m: m.write(f\"avg_win_delta,avg_rf_agent,avg_rf_random\\n\" f\"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\\n\") return ba", "y = copy.deepcopy(ba.weights[x]) max = np.argmax(y) while max in x.cards_played:", "used by non-agents who play randomly. \"\"\" def play_random_card(suit, cards_in_hand):", "14 Representing a \"Suit\" as an integer: n/a is -1", "self.alpha = 0.99995 # 100,000 self.alpha = 0.999995 # 1,000,000", "[-1, -1] agent_state = None agent_state_2 = None opening_suit =", "10000 NUM_GAMES_TEST = 10000 STATS_PER = 1000 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \"", "[-1, card_1, card_2, card_3]: state = State( opening_suit, frozenset([card_1, card_2,", "function determines the declarer based on partnership with the most", "agent_valid_cards, card): if len(agent_valid_cards) > 1 and max(valid_cards) > max(agent_valid_cards):", "Map \"weights\". And the array of length-52 represets the proportion", "\"Card\" as an integer: Cards 0 -> 12 are Club", "= np.argmax(self.weights[state][valid_cards]) index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max())) # returns valid_cards[1]", "BRIDGE \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This function plays 13 rounds of", "+ player) % 4 if player_idx == 2: # Agent", "print(f\"Average Win Delta (want this to be positive): {average_win_delta}\") print(f\"Average", "a Diamond 13 and Diamond 14 will beat the Diamond", "self.weights[state] = np.full(NUM_ACTIONS, 1.0) # self.alpha = 0.997 # 1,000", "the MDP with a Map from a \"State\" to an", "= {} self.weights[State(-1, frozenset(), -1)] = np.full(NUM_ACTIONS, 1.0) for opening_suit", "shuffle_cards(): random.shuffle(deck) return [deck[0:13], deck[13:26], deck[26:39], deck[39:52]] \"\"\" This function", "card_played) \"\"\" def add_win(self, state, card_played): self.weights[state][card_played] *= (1 +", "# self.alpha = 0.997 # 1,000 # self.alpha = 0.9995", "\"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" DEFINE SOME CONSTANTS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" NUM_ACTIONS =", "\" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgent: def __init__(self): # We initialize all", "integer: Cards 0 -> 12 are Club 2 -> Club", "x in ba.weights: y = copy.deepcopy(ba.weights[x]) max = np.argmax(y) while", "idx, card in enumerate(cards_played): if suit * 13 <= card", "when there's three cards played already and the agent has", "max(valid_cards) not in partners_cards ): self.RED_FLAG_TOTAL_COUNT[1] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[1] +=", "suit of Diamonds. The agent is 3rd to play a", "SOME CONSTANTS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" NUM_ACTIONS = 52 # Agent can", "== 2: # Agent plays if ba: agent_state = State(opening_suit,", "is at that state. In this example, with state =", "the policy at the end of the data training phase.", "here print(f\"TESTING on {NUM_GAMES_TEST} games\") game_summary(ba, False, iterations=NUM_GAMES_TEST) end_time =", "play a card and must play a Diamond if it", "higher cards, does it play highest card or highest necessary", "outputs a winner. \"\"\" def play_game(game, hands, train=False, ba=None, barf=None):", "barf.RED_FLAG_VIOLATIONS rfv_random = barf_random.RED_FLAG_VIOLATIONS rftc = barf.RED_FLAG_TOTAL_COUNT rftc_random = barf_random.RED_FLAG_TOTAL_COUNT", "integer: n/a is -1 <-- used in a \"State\" where", "'w') as g: g.write(\"Cards in Hand: {}\\n\\n\".format(cards_in_hand)) with open(filename +", "0 -> 12 are Club 2 -> Club 14 Cards", "max(valid_cards): self.RED_FLAG_VIOLATIONS[1] += 1 self.ALL_RED_FLAG_VIOLATIONS[1] += 1 \"\"\" This function", "+= 1 self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1 if card == max(agent_valid_cards): self.RED_FLAG_VIOLATIONS[0]", "self.alpha = 0.9995 # 10,000 # self.alpha = 0.99995 #", "= np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max())) # returns valid_cards[1] = 24 return", "print(f\"TRAINING on {NUM_GAMES_TRAIN} games\") ba = BridgeAgent() ba = game_summary(ba,", "count += 1 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" UTILITY FUNCTIONS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\"", "count_points(hands) # determines the number of points in each hand", "- Agent: {average_rf_ratios_agent}\") print(f\"Average Red Flag Ratios - Random: {average_rf_ratios_random}\")", "Cumulative def clear_red_flags(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) \"\"\"", "\"\"\" def agent_declarer(hands): points = count_points(hands) # determines the number", "= barf.RED_FLAG_VIOLATIONS rfv_random = barf_random.RED_FLAG_VIOLATIONS rftc = barf.RED_FLAG_TOTAL_COUNT rftc_random =", "13 == 9: p += 1 points.append(p) return points \"\"\"'''''''''''''''''''''''''''''''''''''''''''''''''''''''''", "14), then the agent loses. We want to decrease the", "== 0: # if agent is declarer, they play their", "where no cards have been played yet. Clubs is 0", "player == 0: opening_suit = card // 13 hands[player_idx].remove(card) cards_played.append(card)", "if suit == -1: return # highest card played so", "which the agent should play each of the 52 cards", "points and agent is declarer if points[0] + points[2] >", "is 3 Representing a \"State\" as an opening suit and", "Project: Bridge RL Agent <NAME> & <NAME> \"\"\" import copy", "= card else: # Random bot plays card = play_random_card(opening_suit,", "agent_card_played = [-1, -1] agent_state = None agent_state_2 = None", "> max_val: max_val, max_idx = card, idx return max_idx \"\"\"", "agent partnership states_accessed = [] # records which states have", "NS_Wins_random = [0] for game in range(iterations): hands = shuffle_cards()", "numpy as np import random from collections import namedtuple \"\"\"'''''''''''''''''''''''''''''''''''''''''''''''''''''''''", "def assess_card_played(self, hands, card, suit, cards_played, player_idx, partners_cards): all_valid_cards =", "import numpy as np import random from collections import namedtuple", "Adjust the BridgeAgent weights. # If the BridgeAgent or N", "= np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT) print(f\"Average Win Delta (want this to be", "as g: g.write(\"Cards in Hand: {}\\n\\n\".format(cards_in_hand)) with open(filename + \".txt\",", "it has one. Representing the MDP with a Map from", "return ba def main(): start_time = datetime.datetime.now() hands = []", "train: ba.add_win(agent_state, agent_card_played[0]) if declarer: ba.add_win(agent_state_2, agent_card_played[1]) NS_Wins += 1", "> points[0]: return True, 2 # agent is not declarer", "number of points in each hand # agent's partnership has", "is higher than theirs. \"\"\" def highest_card(self, valid_cards, agent_valid_cards, card):", "\"\"\" This function determines the declarer based on partnership with", "higher than theirs. \"\"\" def highest_card(self, valid_cards, agent_valid_cards, card): if", "card in round If 4th card is not 25, then", "card, suit, cards_played, player_idx, partners_cards): all_valid_cards = list(range(suit * 13,", "def agent_declarer(hands): points = count_points(hands) # determines the number of", "if ba and game == (NUM_GAMES_TRAIN - 1): policy =", "for any red flags based on what the agent played.", "np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max())) # returns valid_cards[1] = 24 return valid_cards[index_into_valid_cards]", "14 will beat the Diamond 12. '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" State = namedtuple('State',", "if player_idx == 2: # Agent plays if ba: agent_state", "highest_card(self, valid_cards, agent_valid_cards, card): if len(agent_valid_cards) > 1 and max(valid_cards)", "training phase. \"\"\" def write_policy(self, cards_in_hand, policy, filename, states_accessed): count", "some are valid). NUM_GAMES_TRAIN = 10000 NUM_GAMES_TEST = 10000 STATS_PER", "1 \"\"\" EXAMPLE state = State(1, set(23, 0)) # Diamond", "= [i for i in range(suit * 13, (suit +", "(want this to be positive): {average_win_delta}\") print(f\"Average Red Flag Ratios", "declarer: agent_state_2 = State(opening_suit, frozenset(cards_played), agent_card_played[0]) states_accessed.append(agent_state_2) card = ba.play_card(agent_state_2,", "def determine_round_winner(suit, cards_played): max_idx = -1 max_val = -1 for", "13 <-- 3rd card in round If 4th card is", "barf_random = BridgeAgentRedFlags() NS_Wins = [0] NS_Wins_random = [0] for", "# Each player plays a card in order starting from", "{}\\nBest Card To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) if state", "play return False, -1 \"\"\" This function counts the points", "card, opening_suit, cards_played, player_idx, partners_cards) elif player_idx == 0: #", "an integer: n/a is -1 <-- used in a \"State\"", "higher_card(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards): if (len(cards_played) == 3", "25 (Diamond 14), then the agent loses. We want to", "player plays a card in order starting from round_winner for", "(len(cards_played) == 3 and len(agent_valid_cards) > 1 and max(valid_cards) in", "play_card(self, state, cards_in_hand): # Following the EXAMPLE: # suit =", "cards_played.append(card) # Get the winning card. round_winner = (determine_round_winner(opening_suit, cards_played)", "in range(suit * 13, (suit + 1) * 13) if", "highest necessary card? self.higher_card(valid_cards, agent_valid_cards, card, cards_played, partners_cards) # 3", "is 13 Ace is 14 Representing a \"Suit\" as an", "card % 13 == 9: p += 1 points.append(p) return", "is 1 Hearts is 2 Spades is 3 Representing a", "a \"State\" to an array of length-52: We call this", "\"\"\" def determine_round_winner(suit, cards_played): max_idx = -1 max_val = -1", "+ points[3] and points[2] > points[0]: return True, 2 #", "= np.full(NUM_ACTIONS, 1.0) # self.alpha = 0.997 # 1,000 #", "for state in self.weights: f.write(\"State: suit {} | cards played", "opening_suit, cards_played, player_idx, partners_cards) else: card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[1]", "[] count = 0 for x in ba.weights: y =", "highest weight. The agent must play a Diamond if it", "1 else: if ba and train: ba.add_loss(agent_state, agent_card_played[0]) if declarer:", "statistics on red flags for every STATS_PER games. barf.clear_red_flags() barf_random.clear_red_flags()", "\"random_rfv_c,random_rftc_c\\n\") barf = BridgeAgentRedFlags() barf_random = BridgeAgentRedFlags() NS_Wins = [0]", "suit * 13 <= card < (suit + 1) *", "REPRESENTATIONS OF BRIDGE Representing a \"Card\" as an integer: Cards", "This function checks if the agent plays their highest card", "np.array([i for i in all_valid_cards if i in cards_played]) agent_valid_cards", "= set(0, 1, 4, 8, 11, 20, 24, 38) The", "** self.game_num) \"\"\" EXAMPLE state = State(1, set(23, 0)) cards_in_hand", "round when there's three cards played already and the agent", "agent wins. We want to incrase the proportion with which", "13 hands[player_idx].remove(card) cards_played.append(card) # Get the winning card. round_winner =", "max(agent_valid_cards) > max(valid_cards) and max(valid_cards) not in partners_cards ): self.RED_FLAG_TOTAL_COUNT[1]", "barf.ALL_RED_FLAG_TOTAL_COUNT) average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT) print(f\"Average Win Delta (want this", "suit == -1: return random.choice(cards_in_hand) valid_cards = [i for i", "== 3 and len(agent_valid_cards) > 1 and max(agent_valid_cards) > max(valid_cards)", "as k: k.write(\"game,\" \"agent_wins,random_wins,diff_wins,\" \"agent_rfv_a,agent_rftc_a,\" \"agent_rfv_b,agent_rftc_b,\" \"agent_rfv_c,agent_rftc_c,\" \"random_rfv_a,random_rftc_a,\" \"random_rfv_b,random_rftc_b,\" \"random_rfv_c,random_rftc_c\\n\")", "namedtuple \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' REPRESENTATIONS OF BRIDGE Representing a \"Card\" as an", "ba.game_num += 1 if (game + 1) % STATS_PER ==", "index_into_valid_counts = 1 since 20 has a smaller weight than", "write_policy(self, cards_in_hand, policy, filename, states_accessed): count = 0 with open(filename", "list(range(52)) def shuffle_cards(): random.shuffle(deck) return [deck[0:13], deck[13:26], deck[26:39], deck[39:52]] \"\"\"", "[] # TRAINING print(f\"TRAINING on {NUM_GAMES_TRAIN} games\") ba = BridgeAgent()", "This function is used by non-agents who play randomly. \"\"\"", "if round_winner == 0 or round_winner == 2: if ba", "given that it is at that state. In this example,", "played. \"\"\" def assess_card_played(self, hands, card, suit, cards_played, player_idx, partners_cards):", "<filename>bridge_RL_agent_v16.py \"\"\" CS 238 Final Project: Bridge RL Agent <NAME>", "np.zeros(3) \"\"\" This function checks if the agent plays their", "= 0 for card in hand: if card % 13", "= 10000 NUM_GAMES_TEST = 10000 STATS_PER = 1000 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \"", "card_2, card_3]), card_partner) self.weights[state] = np.full(NUM_ACTIONS, 1.0) # self.alpha =", "not 25, then the agent wins. We want to incrase", "to count total wins in agent partnership states_accessed = []", "24 # Diamond 13 <-- 3rd card in round If", "False, -1 \"\"\" This function counts the points in each", "so far is higher than agent's highest card self.highest_card(valid_cards, agent_valid_cards,", "(suit + 1) * 13) if i in cards_in_hand] if", "Keep track of the opening suit. if player == 0:", "\" \" TRACKS PERFORMANCE OF BRIDGE AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class", "non-agents who play randomly. \"\"\" def play_random_card(suit, cards_in_hand): if suit", "14 Cards 39 -> 51 are Spade 2 -> Spade", "# Diamond 12, Club 2 <-- first 2 cards in", "* self.alpha ** self.game_num) \"\"\" EXAMPLE state = State(1, set(23,", "BridgeAgentRedFlags() NS_Wins = [0] NS_Wins_random = [0] for game in", "with which we play 24. ba.add_loss(state, card_played) \"\"\" def add_loss(self,", "in range(4): card = None player_idx = (round_winner + player)", "-1 max = np.argmax(y) policy.append(max) count += 1 game_file =", "range(13): cards_played = [] agent_card_played = [-1, -1] agent_state =", "a Diamond if it has Diamonds. In this example, agent", "been played. \"\"\" def higher_card(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards):", "ba.add_loss(agent_state, agent_card_played[0]) if declarer: ba.add_loss(agent_state_2, agent_card_played[1]) # for the last", "agent has higher cards, does it play highest card or", "(NUM_GAMES_TRAIN - 1): policy = [] count = 0 for", "the end of the data training phase. \"\"\" def write_policy(self,", "frozenset(23, 0)) We have a Diamond 12 and Club 2", "agent_card_played[0] = card barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards) elif", "hands = shuffle_cards() NS_Wins[-1] += play_game(game=game, hands=copy.deepcopy(hands), train=True, ba=ba, barf=barf)", "2: # Agent plays if ba: agent_state = State(opening_suit, frozenset(cards_played),", "We call this Map \"weights\". And the array of length-52", "cards_in_hand, policy, filename, states_accessed): count = 0 with open(filename +", "13)) valid_cards = np.array([i for i in all_valid_cards if i", "does it play highest card or highest necessary card? self.higher_card(valid_cards,", "smaller weight than 24. # index_into_valid_cards = np.argmax(self.weights[state][valid_cards]) index_into_valid_cards =", "13 == 12: p += 4 elif card % 13", "card, opening_suit, cards_played, player_idx, partners_cards) else: card = play_random_card(opening_suit, hands[player_idx])", "yet. Clubs is 0 Diamonds is 1 Hearts is 2", "3rd to play a card and must play a Diamond", "self.ALL_RED_FLAG_VIOLATIONS[2] += 1 \"\"\" This function checks for any red", "8, 11, 20, 24, 38) The agent choose to play", "'w') as m: m.write(f\"avg_win_delta,avg_rf_agent,avg_rf_random\\n\" f\"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\\n\") return ba def main(): start_time", "round card_played = 24 # Diamond 13 <-- 3rd card", "last game, determine and write out policy if ba and", "red flags based on what the agent played. \"\"\" def", "len(valid_cards) == 0: valid_cards = cards_in_hand # Choose the valid", "card? do they beat their partner? self.partner_win(valid_cards, agent_valid_cards, card, cards_played,", "0: opening_suit = card // 13 hands[player_idx].remove(card) cards_played.append(card) # Get", "the proportion with which the agent should play each of", "card, idx return max_idx \"\"\" This function determines the declarer", "13 Ace is 14 Representing a \"Suit\" as an integer:", "-> Spade 14 Jack is 11 Queen is 12 King", "# Random bot plays card = play_random_card(opening_suit, hands[player_idx]) # Keep", "lowest card? do they beat their partner? self.partner_win(valid_cards, agent_valid_cards, card,", "has an equal chance of being chosen. self.weights = {}", "must play a Diamond if it has Diamonds. In this", "None opening_suit = -1 # Each player plays a card", "partner's cards if ba and declarer: agent_state_2 = State(opening_suit, frozenset(cards_played),", "def play_card(self, state, cards_in_hand): # Following the EXAMPLE: # suit", "Cards 13 -> 25 are Diamond 2 -> Diamond 14", "ba.add_win(agent_state, agent_card_played[0]) if declarer: ba.add_win(agent_state_2, agent_card_played[1]) NS_Wins += 1 else:", "card, does agent play lowest card? do they beat their", "hand: if card % 13 == 12: p += 4", "valid_cards = np.array([i for i in range(suit * 13, (suit", "valid_cards = [20, 24] valid_cards = np.array([i for i in", "0: print(f\"{game + 1} / \", end=\"\", flush=True) rfv =", "state in states_accessed: g.write(\"State: suit {} | cards played {}", "and agent is declarer if points[0] + points[2] > points[1]", "in hand: if card % 13 == 12: p +=", "-- we don't change the weights here print(f\"TESTING on {NUM_GAMES_TEST}", "set(23, 0)) cards_in_hand = set(0, 1, 4, 8, 11, 20,", "game # For each round for _ in range(13): cards_played", "if i in hands[player_idx]]) if suit == -1: return #", "38, 51 \"\"\" def count_points(hands): points = [] for hand", "* 13)) valid_cards = np.array([i for i in all_valid_cards if", "barf = BridgeAgentRedFlags() barf_random = BridgeAgentRedFlags() NS_Wins = [0] NS_Wins_random", "\"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" UTILITY FUNCTIONS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This functions", "card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[1] = card else: # Random", "has more points and agent is declarer if points[0] +", "This function determines the winner of the round. \"\"\" def", "\"\"\" def assess_card_played(self, hands, card, suit, cards_played, player_idx, partners_cards): all_valid_cards", "self.alpha = 0.9999995 # 5,000,000 self.game_num = 1 \"\"\" EXAMPLE", "whichever remaining card has the highest weight. The agent must", "If the BridgeAgent or N wins. if round_winner == 0", "\"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" RL AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgent: def", "partners_cards) elif player_idx == 0: # if agent is declarer,", "= 0 for x in ba.weights: y = copy.deepcopy(ba.weights[x]) max", "= np.array([i for i in all_valid_cards if i in cards_played])", "\" RL AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgent: def __init__(self): #", "that it is at that state. In this example, with", "declarer: ba.add_loss(agent_state_2, agent_card_played[1]) # for the last game, determine and", "not declarer and agent should start the play return False,", "# the person to the right of the declarer starts", "track of the opening suit. if player == 0: opening_suit", "ba def main(): start_time = datetime.datetime.now() hands = [] #", "right of the declarer starts the game NS_Wins = 0", "an opening suit of Diamonds. The agent is 3rd to", "partnership has more points and agent is declarer if points[0]", "card): if len(agent_valid_cards) > 1 and max(valid_cards) > max(agent_valid_cards): self.RED_FLAG_TOTAL_COUNT[0]", "+= 1 self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1 if card < max(valid_cards): self.RED_FLAG_VIOLATIONS[1]", "0: valid_cards = cards_in_hand # Choose the valid card with", "a card and must play a Diamond if it has", "# 100,000 self.alpha = 0.999995 # 1,000,000 # self.alpha =", "self.alpha = 0.999995 # 1,000,000 # self.alpha = 0.9999995 #", "{}\\n\\n\".format(cards_in_hand)) with open(filename + \".txt\", 'w') as f: for state", "though the highest card already played is higher than theirs.", "# self.alpha = 0.99995 # 100,000 self.alpha = 0.999995 #", "states_accessed): count = 0 with open(filename + \"_Last_Game.txt\", 'w') as", "highest card or highest necessary card? self.higher_card(valid_cards, agent_valid_cards, card, cards_played,", "24 has the highest weight. card_played = ba.play_card(state, cards_in_hand) \"\"\"", "the round. \"\"\" def determine_round_winner(suit, cards_played): max_idx = -1 max_val", "win. \"\"\" def partner_win(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards): if", "1 self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1 if card < max(valid_cards): self.RED_FLAG_VIOLATIONS[1] +=", "\"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" PLAY A SINGLE GAME OF BRIDGE \"", "Diamond 12 and Club 2 with an opening suit of", "'a') as k: k.write( f\"{game + 1},\" f\"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},\"", "AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgent: def __init__(self): # We initialize", "the BridgeAgent or N wins. if round_winner == 0 or", "= [0] NS_Wins_random = [0] for game in range(iterations): hands", "1 if card < max(valid_cards): self.RED_FLAG_VIOLATIONS[1] += 1 self.ALL_RED_FLAG_VIOLATIONS[1] +=", "barf_random.RED_FLAG_TOTAL_COUNT with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t) + \".csv\", 'a')", "+ \"_Game_Data_Train-\" + str(t) + \".csv\", 'a') as k: k.write(", "weight. card_played = ba.play_card(state, cards_in_hand) \"\"\" def play_card(self, state, cards_in_hand):", "self.higher_card(valid_cards, agent_valid_cards, card, cards_played, partners_cards) # 3 cards played +", "hands[1] = East's cards hands[2] = Agent's cards hands[3] =", "14 Cards 13 -> 25 are Diamond 2 -> Diamond", "Ace is 14 Representing a \"Suit\" as an integer: n/a", "agent_card_played[0]) if declarer: ba.add_loss(agent_state_2, agent_card_played[1]) # for the last game,", "for every STATS_PER games. barf.clear_red_flags() barf_random.clear_red_flags() NS_Wins.append(0) NS_Wins_random.append(0) average_win_delta =", "return False, -1 \"\"\" This function counts the points in", "3 and len(agent_valid_cards) > 1 and max(valid_cards) in partners_cards ):", "(round_winner + player) % 4 if player_idx == 2: #", "barf_random.clear_red_flags() NS_Wins.append(0) NS_Wins_random.append(0) average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins) - 1)", "0 for card in hand: if card % 13 ==", "\"\"\" def play_random_card(suit, cards_in_hand): if suit == -1: return random.choice(cards_in_hand)", "of length-52 represets the proportion with which the agent should", "of the round. \"\"\" def determine_round_winner(suit, cards_played): max_idx = -1", "# agent's partnership has more points and agent is declarer", "-1] agent_state = None agent_state_2 = None opening_suit = -1", "+ 1) % 4 # the person to the right", "If 4th card is not 25, then the agent wins.", "at least one higher card than what's been played. \"\"\"", "card in order starting from round_winner for player in range(4):", "self.alpha = 0.997 # 1,000 # self.alpha = 0.9995 #", "== 9: p += 1 points.append(p) return points \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \"", "Agent's cards hands[3] = West's cards \"\"\" round_winner = (d", "to win. \"\"\" def partner_win(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards):", "1 if card == max(agent_valid_cards): self.RED_FLAG_VIOLATIONS[0] += 1 self.ALL_RED_FLAG_VIOLATIONS[0] +=", "be positive): {average_win_delta}\") print(f\"Average Red Flag Ratios - Agent: {average_rf_ratios_agent}\")", "average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins) - 1) * STATS_PER) average_rf_ratios_agent", "RL Agent <NAME> & <NAME> \"\"\" import copy import datetime", "This function checks if the agent plays a higher card", "agent played. \"\"\" def assess_card_played(self, hands, card, suit, cards_played, player_idx,", "and write out policy if ba and game == (NUM_GAMES_TRAIN", "highest card already played is higher than theirs. \"\"\" def", "hopefully 24 has the highest weight. card_played = ba.play_card(state, cards_in_hand)", "1 if (game + 1) % STATS_PER == 0: print(f\"{game", "\"agent_rfv_c,agent_rftc_c,\" \"random_rfv_a,random_rftc_a,\" \"random_rfv_b,random_rftc_b,\" \"random_rfv_c,random_rftc_c\\n\") barf = BridgeAgentRedFlags() barf_random = BridgeAgentRedFlags()", "points[0]: return True, 2 # agent is not declarer and", "1} / \", end=\"\", flush=True) rfv = barf.RED_FLAG_VIOLATIONS rfv_random =", "{average_rf_ratios_random}\") with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Avg_Train-\" + str(t) + \".csv\", 'w')", "['opening_suit', 'cards_played', 'partners_card']) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" DEFINE SOME CONSTANTS \"", "13 rounds of 1 NT bridge and outputs a winner.", "// 13 hands[player_idx].remove(card) cards_played.append(card) # Get the winning card. round_winner", "rfv_random = barf_random.RED_FLAG_VIOLATIONS rftc = barf.RED_FLAG_TOTAL_COUNT rftc_random = barf_random.RED_FLAG_TOTAL_COUNT with", "of length-52: We call this Map \"weights\". And the array", "return random.choice(cards_in_hand) valid_cards = [i for i in range(suit *", "self.weights[state][card_played] *= (1 + 0.1 * self.alpha ** self.game_num) \"\"\"", "choose any card to play (only some are valid). NUM_GAMES_TRAIN", "# agent is not declarer and agent should start the", "which we play 24. ba.add_loss(state, card_played) \"\"\" def add_loss(self, state,", "the agent played. \"\"\" def assess_card_played(self, hands, card, suit, cards_played,", "round_winner == 0 or round_winner == 2: if ba and", "== 2: if ba and train: ba.add_win(agent_state, agent_card_played[0]) if declarer:", "return NS_Wins def game_summary(ba, t, iterations=NUM_GAMES_TRAIN): with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\"", "Each player plays a card in order starting from round_winner", "return True, 2 # agent is not declarer and agent", "points[2] > points[0]: return True, 2 # agent is not", "with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t) + \".csv\", 'w') as", "State(opening_suit, frozenset(cards_played), agent_card_played[1]) states_accessed.append(agent_state) card = ba.play_card(agent_state, hands[player_idx]) else: card", "max(valid_cards) and max(valid_cards) not in partners_cards ): self.RED_FLAG_TOTAL_COUNT[1] += 1", "0.99995 # 100,000 self.alpha = 0.999995 # 1,000,000 # self.alpha", "for card_3 in range(card_2, 52): for card_partner in [-1, card_1,", "barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards) else: card = play_random_card(opening_suit,", "def __init__(self): # We initialize all weights to 1 such", "np.full(NUM_ACTIONS, 1.0) for opening_suit in range(4): for card_1 in range(52):", "in each hand. Note: Ace is 12, 25, 38, 51", "/= (1 + 0.1 * self.alpha ** self.game_num) \"\"\" EXAMPLE", "hand. Note: Ace is 12, 25, 38, 51 \"\"\" def", "[] agent_card_played = [-1, -1] agent_state = None agent_state_2 =", "k.write(\"game,\" \"agent_wins,random_wins,diff_wins,\" \"agent_rfv_a,agent_rftc_a,\" \"agent_rfv_b,agent_rftc_b,\" \"agent_rfv_c,agent_rftc_c,\" \"random_rfv_a,random_rftc_a,\" \"random_rfv_b,random_rftc_b,\" \"random_rfv_c,random_rftc_c\\n\") barf =", "is 3rd to play a card and must play a", "wins. if round_winner == 0 or round_winner == 2: if", "for card in hand: if card % 13 == 12:", "valid_cards = [i for i in range(suit * 13, (suit", "is 12, 25, 38, 51 \"\"\" def count_points(hands): points =", "= np.argmax(y) policy.append(max) count += 1 game_file = \"Bridge_\" +", "State( opening_suit, frozenset([card_1, card_2, card_3]), card_partner) self.weights[state] = np.full(NUM_ACTIONS, 1.0)", "of Diamonds. The agent is 3rd to play a card", "== (NUM_GAMES_TRAIN - 1): policy = [] count = 0", "* STATS_PER) average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT) average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT)", "points[3] and points[2] > points[0]: return True, 2 # agent", "with an opening suit of Diamonds. The agent is 3rd", "'''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This functions deals random cards. \"\"\" deck =", "# Agent can choose any card to play (only some", "np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT) average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT) print(f\"Average Win Delta (want", "\"State\" as an opening suit and frozenset of up to", "agent plays a higher card even though their partner is", "agent_card_played[1]) NS_Wins += 1 else: if ba and train: ba.add_loss(agent_state,", "% 13 == 11: p += 3 elif card %", "the declarer starts the game NS_Wins = 0 # used", "% 4 # the person to the right of the", "0)) card_played = 24 If 4th card is 25 (Diamond", "0 or round_winner == 2: if ba and train: ba.add_win(agent_state,", "== 10: p += 2 elif card % 13 ==", "a winner. \"\"\" def play_game(game, hands, train=False, ba=None, barf=None): partners_cards", "\"State\" to an array of length-52: We call this Map", "OF BRIDGE \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This function plays 13 rounds", "card_partner) self.weights[state] = np.full(NUM_ACTIONS, 1.0) # self.alpha = 0.997 #", "play a Diamond if it has Diamonds. In this example,", "for card_1 in range(52): for card_2 in range(card_1, 52): for", "has higher cards, does it play highest card or highest", "than what's been played. \"\"\" def higher_card(self, valid_cards, agent_valid_cards, card,", "deck[26:39], deck[39:52]] \"\"\" This function is used by non-agents who", "card {}\\nBest Card To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) if", "2 with an opening suit of Diamonds. The agent is", "wins in agent partnership states_accessed = [] # records which", "2: if ba and train: ba.add_win(agent_state, agent_card_played[0]) if declarer: ba.add_win(agent_state_2,", "order starting from round_winner for player in range(4): card =", "a \"State\" as an opening suit and frozenset of up", "in ba.weights: y = copy.deepcopy(ba.weights[x]) max = np.argmax(y) while max", "{}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) count += 1 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \"", "- NS_Wins_random[-1]},\" f\"{rfv[0]},{rftc[0]},\" f\"{rfv[1]},{rftc[1]},\" f\"{rfv[2]},{rftc[2]},\" f\"{rfv_random[0]},{rftc_random[0]},\" f\"{rfv_random[1]},{rftc_random[1]},\" f\"{rfv_random[2]},{rftc_random[2]},\" f\"\\n\") #", "- Random: {average_rf_ratios_random}\") with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Avg_Train-\" + str(t) +", "most likely play 24, which beats 23 <-- hopefully 24", "card barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards) elif player_idx ==", "plays their highest card even though the highest card already", "state = (1, set(23, 0)), weights[state] will likely have very", "1 Hearts is 2 Spades is 3 Representing a \"State\"", "agent should play each of the 52 cards given that", "# determines the number of points in each hand #", "len(agent_valid_cards) > 1 and max(valid_cards) > max(agent_valid_cards): self.RED_FLAG_TOTAL_COUNT[0] += 1", "is declarer, they play their partner's cards if ba and", "3 cards played + partner has played highest card, does", "{NUM_GAMES_TEST} games\") game_summary(ba, False, iterations=NUM_GAMES_TEST) end_time = datetime.datetime.now() print(\"Runtime: \",", "np.argmax(y) while max in x.cards_played: y[max] = -1 max =", "opening suit and frozenset of up to 3 \"Card\"-s: state", "is 2 Spades is 3 Representing a \"State\" as an", "RL AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgent: def __init__(self): # We", "1) * 13) if i in cards_in_hand] if len(valid_cards) ==", "clear_red_flags(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) \"\"\" This function", "1, 4, 8, 11, 20, 24, 38) The agent choose", "if ba and train: ba.add_loss(agent_state, agent_card_played[0]) if declarer: ba.add_loss(agent_state_2, agent_card_played[1])", "% 13 == 9: p += 1 points.append(p) return points", "24. # index_into_valid_cards = np.argmax(self.weights[state][valid_cards]) index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max()))", "-1 <-- used in a \"State\" where no cards have", "\"\"\" This function checks if the agent wins a round", "and the agent has at least one higher card than", "which states have been updated for this game # For", "records which states have been updated for this game #", "NS_Wins.append(0) NS_Wins_random.append(0) average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins) - 1) *", "= agent_declarer(hands) \"\"\" hands[0] = North's cards hands[1] = East's", "= ba.play_card(agent_state_2, hands[player_idx]) barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards) else:", "np.zeros(3) self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3) # Cumulative self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3) #", "1},\" f\"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},\" f\"{rfv[0]},{rftc[0]},\" f\"{rfv[1]},{rftc[1]},\" f\"{rfv[2]},{rftc[2]},\" f\"{rfv_random[0]},{rftc_random[0]},\" f\"{rfv_random[1]},{rftc_random[1]},\" f\"{rfv_random[2]},{rftc_random[2]},\"", "the proportion with which we play 24. ba.add_win(state, card_played) \"\"\"", "4 # Adjust the BridgeAgent weights. # If the BridgeAgent", "play a Diamond if it has one. Representing the MDP", "choose to play whichever remaining card has the highest weight.", "in order starting from round_winner for player in range(4): card", "self.RED_FLAG_TOTAL_COUNT[0] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1 if card == max(agent_valid_cards):", "hands[2] = Agent's cards hands[3] = West's cards \"\"\" round_winner", "from round_winner for player in range(4): card = None player_idx", "opening suit. if player == 0: opening_suit = card //", "Red Flag Ratios - Agent: {average_rf_ratios_agent}\") print(f\"Average Red Flag Ratios", "open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Avg_Train-\" + str(t) + \".csv\", 'w') as m:", "played so far is higher than agent's highest card self.highest_card(valid_cards,", "def shuffle_cards(): random.shuffle(deck) return [deck[0:13], deck[13:26], deck[26:39], deck[39:52]] \"\"\" This", "STATS_PER games. barf.clear_red_flags() barf_random.clear_red_flags() NS_Wins.append(0) NS_Wins_random.append(0) average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random)) /", "(only some are valid). NUM_GAMES_TRAIN = 10000 NUM_GAMES_TEST = 10000", "# if agent is declarer, they play their partner's cards", "cards_in_hand] if len(valid_cards) == 0: return random.choice(cards_in_hand) return random.choice(valid_cards) \"\"\"", "== 12: p += 4 elif card % 13 ==", "Representing a \"Card\" as an integer: Cards 0 -> 12", "\"Suit\" as an integer: n/a is -1 <-- used in", "True) # TESTING -- we don't change the weights here", "BRIDGE Representing a \"Card\" as an integer: Cards 0 ->", "round_winner = (d + 1) % 4 # the person", "the number of points in each hand # agent's partnership", "= np.full(NUM_ACTIONS, 1.0) for opening_suit in range(4): for card_1 in", "+ 1},\" f\"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},\" f\"{rfv[0]},{rftc[0]},\" f\"{rfv[1]},{rftc[1]},\" f\"{rfv[2]},{rftc[2]},\" f\"{rfv_random[0]},{rftc_random[0]},\" f\"{rfv_random[1]},{rftc_random[1]},\"", "card in hand: if card % 13 == 12: p", "Cumulative self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3) # Cumulative def clear_red_flags(self): self.RED_FLAG_VIOLATIONS =", "valid_cards[index_into_valid_cards] \"\"\" This function write the policy at the end", "all_valid_cards = list(range(suit * 13, (suit + 1) * 13))", "return random.choice(valid_cards) \"\"\" This function determines the winner of the", "played already and the agent has at least one higher", "NUM_GAMES_TEST = 10000 STATS_PER = 1000 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" RL", "14 Cards 26 -> 38 are Heart 2 -> Heart", "barf_random.ALL_RED_FLAG_TOTAL_COUNT) print(f\"Average Win Delta (want this to be positive): {average_win_delta}\")", "State(1, set(23, 0)) card_played = 24 If 4th card is", "-> 12 are Club 2 -> Club 14 Cards 13", "North's cards hands[1] = East's cards hands[2] = Agent's cards", "ba.add_win(agent_state_2, agent_card_played[1]) NS_Wins += 1 else: if ba and train:", "want to incrase the proportion with which we play 24.", "This functions deals random cards. \"\"\" deck = list(range(52)) def", "if i in cards_in_hand] if len(valid_cards) == 0: return random.choice(cards_in_hand)", "play_random_card(opening_suit, hands[player_idx]) agent_card_played[1] = card else: # Random bot plays", "+= 1 self.ALL_RED_FLAG_VIOLATIONS[2] += 1 \"\"\" This function checks for", "+= 1 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" UTILITY FUNCTIONS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\"", "1) ba.write_policy(agents_cards, policy, game_file, states_accessed) return NS_Wins def game_summary(ba, t,", "1) * 13)) valid_cards = np.array([i for i in all_valid_cards", "\"\"\" def play_card(self, state, cards_in_hand): # Following the EXAMPLE: #", "agent's partnership has more points and agent is declarer if", "\" \" UTILITY FUNCTIONS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This functions deals", "an opening suit and frozenset of up to 3 \"Card\"-s:", "+ str(t) + \".csv\", 'w') as m: m.write(f\"avg_win_delta,avg_rf_agent,avg_rf_random\\n\" f\"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\\n\") return", "if (len(cards_played) == 3 and len(agent_valid_cards) > 1 and max(valid_cards)", "list(range(suit * 13, (suit + 1) * 13)) valid_cards =", "partner has played highest card, does agent play lowest card?", "points.append(p) return points \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" TRACKS PERFORMANCE OF BRIDGE", "player_idx, partners_cards) elif player_idx == 0: # if agent is", "Cards 26 -> 38 are Heart 2 -> Heart 14", "open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t) + \".csv\", 'a') as k:", "= [20, 24] valid_cards = np.array([i for i in range(suit", "+= 4 elif card % 13 == 11: p +=", "BridgeAgentRedFlags: def __init__(self): self.RED_FLAG_VIOLATIONS = np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) self.ALL_RED_FLAG_VIOLATIONS", "= np.zeros(3) \"\"\" This function checks if the agent plays", "player_idx == 0: # if agent is declarer, they play", "card is 25 (Diamond 14), then the agent loses. We", "rftc_random = barf_random.RED_FLAG_TOTAL_COUNT with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t) +", "= -1 # Each player plays a card in order", "returns valid_cards[1] = 24 return valid_cards[index_into_valid_cards] \"\"\" This function write", "card_1 in range(52): for card_2 in range(card_1, 52): for card_3", "1 and max(agent_valid_cards) > max(valid_cards) and max(valid_cards) not in partners_cards", "agent's highest card self.highest_card(valid_cards, agent_valid_cards, card) # 3 cards played", "= copy.copy(hands[0]) agents_cards = copy.copy(hands[2]) declarer, d = agent_declarer(hands) \"\"\"", "hands[player_idx]) # Keep track of the opening suit. if player", "for card_2 in range(card_1, 52): for card_3 in range(card_2, 52):", "import random from collections import namedtuple \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' REPRESENTATIONS OF BRIDGE", "{NUM_GAMES_TRAIN} games\") ba = BridgeAgent() ba = game_summary(ba, True) #", "range(card_1, 52): for card_3 in range(card_2, 52): for card_partner in", "else: card = play_random_card(opening_suit, hands[player_idx]) agent_card_played[1] = card else: #", "1 such that every card has an equal chance of", "1 self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1 if card > max(valid_cards): self.RED_FLAG_VIOLATIONS[2] +=", "% 13 == 10: p += 2 elif card %", "played is higher than theirs. \"\"\" def highest_card(self, valid_cards, agent_valid_cards,", "agent_state = None agent_state_2 = None opening_suit = -1 #", "a higher card even though their partner is guaranteed to", "if ba: agent_state = State(opening_suit, frozenset(cards_played), agent_card_played[1]) states_accessed.append(agent_state) card =", "+ 1) % STATS_PER == 0: print(f\"{game + 1} /", "max_val = -1 for idx, card in enumerate(cards_played): if suit", "have been updated for this game # For each round", "end of the data training phase. \"\"\" def write_policy(self, cards_in_hand,", "= (1, set(23, 0)), weights[state] will likely have very large", "count = 0 for x in ba.weights: y = copy.deepcopy(ba.weights[x])", "Following the EXAMPLE: # suit = 1 suit = state.opening_suit", "a Diamond if it has one. Representing the MDP with", "declarer_idx) \"\"\" def agent_declarer(hands): points = count_points(hands) # determines the", "card even though the highest card already played is higher", "hands=hands, ba=None, barf=barf_random) ba.game_num += 1 if (game + 1)", "Diamond if it has one. Representing the MDP with a", "card than what's been played. \"\"\" def higher_card(self, valid_cards, agent_valid_cards,", "\"\"\" def highest_card(self, valid_cards, agent_valid_cards, card): if len(agent_valid_cards) > 1", "> max(valid_cards) and max(valid_cards) not in partners_cards ): self.RED_FLAG_TOTAL_COUNT[1] +=", "hands[player_idx]]) if suit == -1: return # highest card played", "== 11: p += 3 elif card % 13 ==", "self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1 if card < max(valid_cards): self.RED_FLAG_VIOLATIONS[1] += 1", "state = State(1, set(23, 0)) cards_in_hand = set(0, 1, 4,", "= (round_winner + player) % 4 if player_idx == 2:", "agent_declarer(hands): points = count_points(hands) # determines the number of points", "partner? self.partner_win(valid_cards, agent_valid_cards, card, cards_played, partners_cards) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" PLAY", "if card < max(valid_cards): self.RED_FLAG_VIOLATIONS[1] += 1 self.ALL_RED_FLAG_VIOLATIONS[1] += 1", "policy if ba and game == (NUM_GAMES_TRAIN - 1): policy", "/ \", end=\"\", flush=True) rfv = barf.RED_FLAG_VIOLATIONS rfv_random = barf_random.RED_FLAG_VIOLATIONS", "k: k.write( f\"{game + 1},\" f\"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},\" f\"{rfv[0]},{rftc[0]},\" f\"{rfv[1]},{rftc[1]},\"", "rftc = barf.RED_FLAG_TOTAL_COUNT rftc_random = barf_random.RED_FLAG_TOTAL_COUNT with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\"", "example, agent will most likely play 24, which beats 23", "points in each hand # agent's partnership has more points", "higher card than what's been played. \"\"\" def higher_card(self, valid_cards,", "BridgeAgent weights. # If the BridgeAgent or N wins. if", "agent is 3rd to play a card and must play", "with a Map from a \"State\" to an array of", "agent should start the play return False, -1 \"\"\" This", "as an integer: n/a is -1 <-- used in a", "updated for this game # For each round for _", "cards if ba and declarer: agent_state_2 = State(opening_suit, frozenset(cards_played), agent_card_played[0])", "range(4): for card_1 in range(52): for card_2 in range(card_1, 52):", "): self.RED_FLAG_TOTAL_COUNT[1] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1 if card <", "with state = (1, set(23, 0)), weights[state] will likely have", "has the highest weight. card_played = ba.play_card(state, cards_in_hand) \"\"\" def", "-1: return random.choice(cards_in_hand) valid_cards = [i for i in range(suit", "\"\"\" This function counts the points in each hand. Note:", "cards_in_hand) \"\"\" def play_card(self, state, cards_in_hand): # Following the EXAMPLE:", "np.array([i for i in range(suit * 13, (suit + 1)", "<= card < (suit + 1) * 13 and card", "12: p += 4 elif card % 13 == 11:", "+ str(game + 1) ba.write_policy(agents_cards, policy, game_file, states_accessed) return NS_Wins", "1 \"\"\" This function checks if the agent wins a", "+ \".txt\", 'w') as f: for state in self.weights: f.write(\"State:", "'w') as f: for state in self.weights: f.write(\"State: suit {}", "a \"Card\" as an integer: Cards 0 -> 12 are", "it is at that state. In this example, with state", "suit = 1 suit = state.opening_suit # valid_cards = [20,", "10,000 # self.alpha = 0.99995 # 100,000 self.alpha = 0.999995", "238 Final Project: Bridge RL Agent <NAME> & <NAME> \"\"\"", "the winning card. round_winner = (determine_round_winner(opening_suit, cards_played) + round_winner) %", "hands[player_idx]) barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards) else: card =", "Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card, policy[count])) count += 1 \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \"", "{average_win_delta}\") print(f\"Average Red Flag Ratios - Agent: {average_rf_ratios_agent}\") print(f\"Average Red", "= [] count = 0 for x in ba.weights: y", "total wins in agent partnership states_accessed = [] # records", "1) * 13) if i in cards_in_hand]) if len(valid_cards) ==", "self.RED_FLAG_VIOLATIONS[0] += 1 self.ALL_RED_FLAG_VIOLATIONS[0] += 1 \"\"\" This function checks", "deck = list(range(52)) def shuffle_cards(): random.shuffle(deck) return [deck[0:13], deck[13:26], deck[26:39],", "= (determine_round_winner(opening_suit, cards_played) + round_winner) % 4 # Adjust the", "(suit + 1) * 13) if i in cards_in_hand]) if", "we don't change the weights here print(f\"TESTING on {NUM_GAMES_TEST} games\")", "% STATS_PER == 0: print(f\"{game + 1} / \", end=\"\",", "are Diamond 2 -> Diamond 14 Cards 26 -> 38", "to 3 \"Card\"-s: state = State(1, frozenset(23, 0)) We have", "max_idx = -1 max_val = -1 for idx, card in", "(Diamond 14), then the agent loses. We want to decrease", "to play whichever remaining card has the highest weight. The", "= np.zeros(3) self.RED_FLAG_TOTAL_COUNT = np.zeros(3) self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3) # Cumulative", "loses. We want to decrease the proportion with which we", "+= play_game(game=game, hands=hands, ba=None, barf=barf_random) ba.game_num += 1 if (game", "Diamond 13 <-- 3rd card in round If 4th card", "highest card, does agent play lowest card? do they beat", "points in each hand. Note: Ace is 12, 25, 38,", "what's been played. \"\"\" def higher_card(self, valid_cards, agent_valid_cards, card, cards_played,", "= np.zeros(3) # Cumulative self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3) # Cumulative def", "the highest card already played is higher than theirs. \"\"\"", "if points[0] + points[2] > points[1] + points[3] and points[2]", "in states_accessed: g.write(\"State: suit {} | cards played {} |", "g.write(\"Cards in Hand: {}\\n\\n\".format(cards_in_hand)) with open(filename + \".txt\", 'w') as", "valid_cards[1] = 24 return valid_cards[index_into_valid_cards] \"\"\" This function write the", "# returns valid_cards[1] = 24 return valid_cards[index_into_valid_cards] \"\"\" This function", "< max(valid_cards): self.RED_FLAG_VIOLATIONS[1] += 1 self.ALL_RED_FLAG_VIOLATIONS[1] += 1 \"\"\" This", "player) % 4 if player_idx == 2: # Agent plays", "chance of being chosen. self.weights = {} self.weights[State(-1, frozenset(), -1)]", "determines the number of points in each hand # agent's", "and game == (NUM_GAMES_TRAIN - 1): policy = [] count", "in range(card_2, 52): for card_partner in [-1, card_1, card_2, card_3]:", "(agent_is_declarer, declarer_idx) \"\"\" def agent_declarer(hands): points = count_points(hands) # determines", "function checks if the agent plays their highest card even", "in all_valid_cards if i in hands[player_idx]]) if suit == -1:", "And the array of length-52 represets the proportion with which", "BRIDGE AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgentRedFlags: def __init__(self): self.RED_FLAG_VIOLATIONS =", "weight. The agent must play a Diamond if it has", "NS_Wins_random[-1] += play_game(game=game, hands=hands, ba=None, barf=barf_random) ba.game_num += 1 if", "\" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This functions deals random cards. \"\"\" deck", "proportion with which the agent should play each of the", "state in self.weights: f.write(\"State: suit {} | cards played {}", "already played is higher than theirs. \"\"\" def highest_card(self, valid_cards,", "do they beat their partner? self.partner_win(valid_cards, agent_valid_cards, card, cards_played, partners_cards)", "= [-1, -1] agent_state = None agent_state_2 = None opening_suit", "+= 1 game_file = \"Bridge_\" + str(game + 1) ba.write_policy(agents_cards,", "if the agent plays their highest card even though the", "# valid_cards = [20, 24] valid_cards = np.array([i for i", "agent_declarer(hands) \"\"\" hands[0] = North's cards hands[1] = East's cards", "25 are Diamond 2 -> Diamond 14 Cards 26 ->", "+ 1) * 13) if i in cards_in_hand] if len(valid_cards)", "and max(valid_cards) > max(agent_valid_cards): self.RED_FLAG_TOTAL_COUNT[0] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1", "max(valid_cards) > max(agent_valid_cards): self.RED_FLAG_TOTAL_COUNT[0] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1 if", "range(suit * 13, (suit + 1) * 13) if i", "ba=None, barf=None): partners_cards = copy.copy(hands[0]) agents_cards = copy.copy(hands[2]) declarer, d", "the points in each hand. Note: Ace is 12, 25,", "max in x.cards_played: y[max] = -1 max = np.argmax(y) policy.append(max)", "from collections import namedtuple \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' REPRESENTATIONS OF BRIDGE Representing a", "Spades is 3 Representing a \"State\" as an opening suit", "'cards_played', 'partners_card']) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" DEFINE SOME CONSTANTS \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\"", "card = ba.play_card(agent_state_2, hands[player_idx]) barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards)", "import namedtuple \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' REPRESENTATIONS OF BRIDGE Representing a \"Card\" as", "hands[player_idx]) agent_card_played[1] = card else: # Random bot plays card", "and agent should start the play return False, -1 \"\"\"", "player_idx = (round_winner + player) % 4 if player_idx ==", "first 2 cards in round card_played = 24 # Diamond", "of the data training phase. \"\"\" def write_policy(self, cards_in_hand, policy,", "all_valid_cards if i in cards_played]) agent_valid_cards = np.array([i for i", "'''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" \"\"\" This function plays 13 rounds of 1 NT", "self.game_num = 1 \"\"\" EXAMPLE state = State(1, set(23, 0))", "data training phase. \"\"\" def write_policy(self, cards_in_hand, policy, filename, states_accessed):", "agent is declarer if points[0] + points[2] > points[1] +", "[deck[0:13], deck[13:26], deck[26:39], deck[39:52]] \"\"\" This function is used by", "open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Train-\" + str(t) + \".csv\", 'w') as k:", "51 \"\"\" def count_points(hands): points = [] for hand in", "agent_valid_cards, card, cards_played, partners_cards) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" PLAY A SINGLE", "Diamond 12. '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" State = namedtuple('State', ['opening_suit', 'cards_played', 'partners_card']) \"\"\"'''''''''''''''''''''''''''''''''''''''''''''''''''''''''", "max_val, max_idx = card, idx return max_idx \"\"\" This function", "# used to count total wins in agent partnership states_accessed", "ba and declarer: agent_state_2 = State(opening_suit, frozenset(cards_played), agent_card_played[0]) states_accessed.append(agent_state_2) card", "Cards 39 -> 51 are Spade 2 -> Spade 14", "play (only some are valid). NUM_GAMES_TRAIN = 10000 NUM_GAMES_TEST =", "= barf_random.RED_FLAG_VIOLATIONS rftc = barf.RED_FLAG_TOTAL_COUNT rftc_random = barf_random.RED_FLAG_TOTAL_COUNT with open(str(NUM_GAMES_TRAIN)", "are Spade 2 -> Spade 14 Jack is 11 Queen", "state. In this example, with state = (1, set(23, 0)),", "hands: p = 0 for card in hand: if card", "Ace is 12, 25, 38, 51 \"\"\" def count_points(hands): points", "cards_played, partners_cards) # 3 cards played + partner has played", "= BridgeAgentRedFlags() NS_Wins = [0] NS_Wins_random = [0] for game", "card_partner in [-1, card_1, card_2, card_3]: state = State( opening_suit,", "= play_random_card(opening_suit, hands[player_idx]) agent_card_played[0] = card barf.assess_card_played(hands, card, opening_suit, cards_played,", "train: ba.add_loss(agent_state, agent_card_played[0]) if declarer: ba.add_loss(agent_state_2, agent_card_played[1]) # for the", "\"_Last_Game.txt\", 'w') as g: g.write(\"Cards in Hand: {}\\n\\n\".format(cards_in_hand)) with open(filename", "If 4th card is 25 (Diamond 14), then the agent", "\"\"\" import copy import datetime import numpy as np import", "+= 1 \"\"\" This function checks if the agent wins", "Agent <NAME> & <NAME> \"\"\" import copy import datetime import", "Bridge RL Agent <NAME> & <NAME> \"\"\" import copy import", "m: m.write(f\"avg_win_delta,avg_rf_agent,avg_rf_random\\n\" f\"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\\n\") return ba def main(): start_time = datetime.datetime.now()", "= state.opening_suit # valid_cards = [20, 24] valid_cards = np.array([i", "= copy.deepcopy(ba.weights[x]) max = np.argmax(y) while max in x.cards_played: y[max]", "i in all_valid_cards if i in cards_played]) agent_valid_cards = np.array([i", "np.argmax(self.weights[state][valid_cards]) index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max())) # returns valid_cards[1] =", "max(agent_valid_cards): self.RED_FLAG_VIOLATIONS[0] += 1 self.ALL_RED_FLAG_VIOLATIONS[0] += 1 \"\"\" This function", "\"weights\". And the array of length-52 represets the proportion with", "AGENT \" '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" class BridgeAgentRedFlags: def __init__(self): self.RED_FLAG_VIOLATIONS = np.zeros(3)", "card < max(valid_cards): self.RED_FLAG_VIOLATIONS[1] += 1 self.ALL_RED_FLAG_VIOLATIONS[1] += 1 \"\"\"", "as an opening suit and frozenset of up to 3", "len(agent_valid_cards) > 1 and max(valid_cards) in partners_cards ): self.RED_FLAG_TOTAL_COUNT[2] +=", "wins a round when there's three cards played already and", "is 0 Diamonds is 1 Hearts is 2 Spades is", "1.0) # self.alpha = 0.997 # 1,000 # self.alpha =", "suit. if player == 0: opening_suit = card // 13", "change the weights here print(f\"TESTING on {NUM_GAMES_TEST} games\") game_summary(ba, False,", "0 for x in ba.weights: y = copy.deepcopy(ba.weights[x]) max =", "partners_cards) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \" \" PLAY A SINGLE GAME OF BRIDGE", "card = play_random_card(opening_suit, hands[player_idx]) # Keep track of the opening", "card with highest weight. # index_into_valid_counts = 1 since 20", "We want to incrase the proportion with which we play", "ba and game == (NUM_GAMES_TRAIN - 1): policy = []", "play each of the 52 cards given that it is", "points[2] > points[1] + points[3] and points[2] > points[0]: return", "= 0 with open(filename + \"_Last_Game.txt\", 'w') as g: g.write(\"Cards", "and train: ba.add_loss(agent_state, agent_card_played[0]) if declarer: ba.add_loss(agent_state_2, agent_card_played[1]) # for", "12. '''''''''''''''''''''''''''''''''''''''''''''''''''''''''\"\"\" State = namedtuple('State', ['opening_suit', 'cards_played', 'partners_card']) \"\"\"''''''''''''''''''''''''''''''''''''''''''''''''''''''''' \"", "+= 1 \"\"\" This function checks for any red flags", "| partner's card {}\\nBest Card To Play: {}\\n\\n\".format(state.opening_suit, state.cards_played, state.partners_card,", "counts the points in each hand. Note: Ace is 12,", "def play_game(game, hands, train=False, ba=None, barf=None): partners_cards = copy.copy(hands[0]) agents_cards", "Representing the MDP with a Map from a \"State\" to", "\".csv\", 'a') as k: k.write( f\"{game + 1},\" f\"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] -", "the agent wins a round when there's three cards played", "i in all_valid_cards if i in hands[player_idx]]) if suit ==", "in cards_played]) agent_valid_cards = np.array([i for i in all_valid_cards if", "* 13) if i in cards_in_hand]) if len(valid_cards) == 0:", "<-- hopefully 24 has the highest weight. card_played = ba.play_card(state,", "# 5,000,000 self.game_num = 1 \"\"\" EXAMPLE state = State(1,", "copy.copy(hands[0]) agents_cards = copy.copy(hands[2]) declarer, d = agent_declarer(hands) \"\"\" hands[0]", "being chosen. self.weights = {} self.weights[State(-1, frozenset(), -1)] = np.full(NUM_ACTIONS,", "= [0] for game in range(iterations): hands = shuffle_cards() NS_Wins[-1]", "1 and max(valid_cards) in partners_cards ): self.RED_FLAG_TOTAL_COUNT[2] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[2]", "partners_cards ): self.RED_FLAG_TOTAL_COUNT[2] += 1 self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1 if card", "0.999995 # 1,000,000 # self.alpha = 0.9999995 # 5,000,000 self.game_num", "Red Flag Ratios - Random: {average_rf_ratios_random}\") with open(str(NUM_GAMES_TRAIN) + \"_Game_Data_Avg_Train-\"", "what the agent played. \"\"\" def assess_card_played(self, hands, card, suit,", "played highest card, does agent play lowest card? do they", "this to be positive): {average_win_delta}\") print(f\"Average Red Flag Ratios -", "incrase the proportion with which we play 24. ba.add_win(state, card_played)" ]
[ "repository.data.installed_version = \"1\" hacs.repositories = [repository] hacs.hass = HomeAssistant() hacs.hass.config.config_dir", "= get_hacs() hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir await data.restore()", "HacsData from custom_components.hacs.helpers.classes.repository import HacsRepository from custom_components.hacs.hacsbase.configuration import Configuration from", "= [repository] hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration =", "import dummy_repository_base @pytest.mark.asyncio async def test_hacs_data_async_write1(tmpdir): data = HacsData() hacs", "hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration = Configuration() await", "from tests.dummy_repository import dummy_repository_base @pytest.mark.asyncio async def test_hacs_data_async_write1(tmpdir): data =", "from homeassistant.core import HomeAssistant from custom_components.hacs.hacsbase.data import HacsData from custom_components.hacs.helpers.classes.repository", "Test Suite.\"\"\" from aiogithubapi.objects import repository import pytest import os", "dummy_repository_base() repository.data.installed = True repository.data.installed_version = \"1\" hacs.repositories = [repository]", "import HomeAssistant from custom_components.hacs.hacsbase.data import HacsData from custom_components.hacs.helpers.classes.repository import HacsRepository", "False await data.async_write() @pytest.mark.asyncio async def test_hacs_data_restore(tmpdir): data = HacsData()", "\"1\" hacs.repositories = [repository] hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir", "await data.async_write() @pytest.mark.asyncio async def test_hacs_data_restore(tmpdir): data = HacsData() hacs", "= True repository.data.installed_version = \"1\" hacs.repositories = [repository] hacs.hass =", "= get_hacs() hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration =", "HacsData() hacs = get_hacs() hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir", "@pytest.mark.asyncio async def test_hacs_data_async_write2(tmpdir): data = HacsData() hacs = get_hacs()", "hacs.system.disabled = False await data.async_write() @pytest.mark.asyncio async def test_hacs_data_restore(tmpdir): data", "data = HacsData() hacs = get_hacs() repository = dummy_repository_base() repository.data.installed", "repository.data.installed = True repository.data.installed_version = \"1\" hacs.repositories = [repository] hacs.hass", "import Configuration from custom_components.hacs.share import get_hacs from tests.dummy_repository import dummy_repository_base", "test_hacs_data_async_write2(tmpdir): data = HacsData() hacs = get_hacs() hacs.hass = HomeAssistant()", "repository import pytest import os from homeassistant.core import HomeAssistant from", "[repository] hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration = Configuration()", "hacs.configuration = Configuration() hacs.system.status.background_task = False hacs.system.disabled = False await", "test_hacs_data_restore(tmpdir): data = HacsData() hacs = get_hacs() hacs.hass = HomeAssistant()", "test_hacs_data_async_write1(tmpdir): data = HacsData() hacs = get_hacs() repository = dummy_repository_base()", "\"\"\"Data Test Suite.\"\"\" from aiogithubapi.objects import repository import pytest import", "import os from homeassistant.core import HomeAssistant from custom_components.hacs.hacsbase.data import HacsData", "= Configuration() hacs.system.status.background_task = False hacs.system.disabled = False await data.async_write()", "hacs.system.status.background_task = False hacs.system.disabled = False await data.async_write() @pytest.mark.asyncio async", "Configuration from custom_components.hacs.share import get_hacs from tests.dummy_repository import dummy_repository_base @pytest.mark.asyncio", "= Configuration() await data.async_write() @pytest.mark.asyncio async def test_hacs_data_async_write2(tmpdir): data =", "hacs = get_hacs() repository = dummy_repository_base() repository.data.installed = True repository.data.installed_version", "import HacsRepository from custom_components.hacs.hacsbase.configuration import Configuration from custom_components.hacs.share import get_hacs", "data = HacsData() hacs = get_hacs() hacs.hass = HomeAssistant() hacs.hass.config.config_dir", "repository = dummy_repository_base() repository.data.installed = True repository.data.installed_version = \"1\" hacs.repositories", "= HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration = Configuration() hacs.system.status.background_task =", "HacsRepository from custom_components.hacs.hacsbase.configuration import Configuration from custom_components.hacs.share import get_hacs from", "aiogithubapi.objects import repository import pytest import os from homeassistant.core import", "HacsData() hacs = get_hacs() repository = dummy_repository_base() repository.data.installed = True", "HomeAssistant from custom_components.hacs.hacsbase.data import HacsData from custom_components.hacs.helpers.classes.repository import HacsRepository from", "= HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration = Configuration() await data.async_write()", "hacs.repositories = [repository] hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration", "hacs.hass.config.config_dir = tmpdir hacs.configuration = Configuration() await data.async_write() @pytest.mark.asyncio async", "async def test_hacs_data_restore(tmpdir): data = HacsData() hacs = get_hacs() hacs.hass", "custom_components.hacs.share import get_hacs from tests.dummy_repository import dummy_repository_base @pytest.mark.asyncio async def", "os from homeassistant.core import HomeAssistant from custom_components.hacs.hacsbase.data import HacsData from", "get_hacs from tests.dummy_repository import dummy_repository_base @pytest.mark.asyncio async def test_hacs_data_async_write1(tmpdir): data", "dummy_repository_base @pytest.mark.asyncio async def test_hacs_data_async_write1(tmpdir): data = HacsData() hacs =", "def test_hacs_data_async_write1(tmpdir): data = HacsData() hacs = get_hacs() repository =", "import get_hacs from tests.dummy_repository import dummy_repository_base @pytest.mark.asyncio async def test_hacs_data_async_write1(tmpdir):", "import pytest import os from homeassistant.core import HomeAssistant from custom_components.hacs.hacsbase.data", "from custom_components.hacs.hacsbase.data import HacsData from custom_components.hacs.helpers.classes.repository import HacsRepository from custom_components.hacs.hacsbase.configuration", "get_hacs() repository = dummy_repository_base() repository.data.installed = True repository.data.installed_version = \"1\"", "get_hacs() hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration = Configuration()", "homeassistant.core import HomeAssistant from custom_components.hacs.hacsbase.data import HacsData from custom_components.hacs.helpers.classes.repository import", "@pytest.mark.asyncio async def test_hacs_data_async_write1(tmpdir): data = HacsData() hacs = get_hacs()", "await data.async_write() @pytest.mark.asyncio async def test_hacs_data_async_write2(tmpdir): data = HacsData() hacs", "import repository import pytest import os from homeassistant.core import HomeAssistant", "= False hacs.system.disabled = False await data.async_write() @pytest.mark.asyncio async def", "True repository.data.installed_version = \"1\" hacs.repositories = [repository] hacs.hass = HomeAssistant()", "@pytest.mark.asyncio async def test_hacs_data_restore(tmpdir): data = HacsData() hacs = get_hacs()", "def test_hacs_data_restore(tmpdir): data = HacsData() hacs = get_hacs() hacs.hass =", "from custom_components.hacs.hacsbase.configuration import Configuration from custom_components.hacs.share import get_hacs from tests.dummy_repository", "async def test_hacs_data_async_write2(tmpdir): data = HacsData() hacs = get_hacs() hacs.hass", "tmpdir hacs.configuration = Configuration() hacs.system.status.background_task = False hacs.system.disabled = False", "= tmpdir hacs.configuration = Configuration() hacs.system.status.background_task = False hacs.system.disabled =", "= tmpdir hacs.configuration = Configuration() await data.async_write() @pytest.mark.asyncio async def", "tests.dummy_repository import dummy_repository_base @pytest.mark.asyncio async def test_hacs_data_async_write1(tmpdir): data = HacsData()", "= get_hacs() repository = dummy_repository_base() repository.data.installed = True repository.data.installed_version =", "hacs = get_hacs() hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration", "hacs = get_hacs() hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir await", "= False await data.async_write() @pytest.mark.asyncio async def test_hacs_data_restore(tmpdir): data =", "hacs.configuration = Configuration() await data.async_write() @pytest.mark.asyncio async def test_hacs_data_async_write2(tmpdir): data", "def test_hacs_data_async_write2(tmpdir): data = HacsData() hacs = get_hacs() hacs.hass =", "data.async_write() @pytest.mark.asyncio async def test_hacs_data_restore(tmpdir): data = HacsData() hacs =", "= HacsData() hacs = get_hacs() hacs.hass = HomeAssistant() hacs.hass.config.config_dir =", "False hacs.system.disabled = False await data.async_write() @pytest.mark.asyncio async def test_hacs_data_restore(tmpdir):", "data.async_write() @pytest.mark.asyncio async def test_hacs_data_async_write2(tmpdir): data = HacsData() hacs =", "import HacsData from custom_components.hacs.helpers.classes.repository import HacsRepository from custom_components.hacs.hacsbase.configuration import Configuration", "HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration = Configuration() await data.async_write() @pytest.mark.asyncio", "= HacsData() hacs = get_hacs() repository = dummy_repository_base() repository.data.installed =", "tmpdir hacs.configuration = Configuration() await data.async_write() @pytest.mark.asyncio async def test_hacs_data_async_write2(tmpdir):", "custom_components.hacs.helpers.classes.repository import HacsRepository from custom_components.hacs.hacsbase.configuration import Configuration from custom_components.hacs.share import", "pytest import os from homeassistant.core import HomeAssistant from custom_components.hacs.hacsbase.data import", "= \"1\" hacs.repositories = [repository] hacs.hass = HomeAssistant() hacs.hass.config.config_dir =", "Suite.\"\"\" from aiogithubapi.objects import repository import pytest import os from", "hacs.hass.config.config_dir = tmpdir hacs.configuration = Configuration() hacs.system.status.background_task = False hacs.system.disabled", "Configuration() await data.async_write() @pytest.mark.asyncio async def test_hacs_data_async_write2(tmpdir): data = HacsData()", "= dummy_repository_base() repository.data.installed = True repository.data.installed_version = \"1\" hacs.repositories =", "from custom_components.hacs.helpers.classes.repository import HacsRepository from custom_components.hacs.hacsbase.configuration import Configuration from custom_components.hacs.share", "from custom_components.hacs.share import get_hacs from tests.dummy_repository import dummy_repository_base @pytest.mark.asyncio async", "hacs.hass = HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration = Configuration() hacs.system.status.background_task", "from aiogithubapi.objects import repository import pytest import os from homeassistant.core", "custom_components.hacs.hacsbase.configuration import Configuration from custom_components.hacs.share import get_hacs from tests.dummy_repository import", "async def test_hacs_data_async_write1(tmpdir): data = HacsData() hacs = get_hacs() repository", "HomeAssistant() hacs.hass.config.config_dir = tmpdir hacs.configuration = Configuration() hacs.system.status.background_task = False", "custom_components.hacs.hacsbase.data import HacsData from custom_components.hacs.helpers.classes.repository import HacsRepository from custom_components.hacs.hacsbase.configuration import", "Configuration() hacs.system.status.background_task = False hacs.system.disabled = False await data.async_write() @pytest.mark.asyncio" ]